* [PATCH 01/24] linux-user: Use assert in mmap_fork_start
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 8:47 ` Alex Bennée
2023-07-03 15:56 ` Philippe Mathieu-Daudé
2023-06-30 13:21 ` [PATCH 02/24] linux-user: Fix formatting of mmap.c Richard Henderson
` (22 subsequent siblings)
23 siblings, 2 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Assert is preferred over if+abort for the error message.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 0aa8ae7356..83cbcce769 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -49,8 +49,7 @@ bool have_mmap_lock(void)
/* Grab lock to make sure things are in a consistent state after fork(). */
void mmap_fork_start(void)
{
- if (mmap_lock_count)
- abort();
+ assert(mmap_lock_count == 0);
pthread_mutex_lock(&mmap_mutex);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 02/24] linux-user: Fix formatting of mmap.c
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
2023-06-30 13:21 ` [PATCH 01/24] linux-user: Use assert in mmap_fork_start Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 8:48 ` Alex Bennée
2023-07-03 15:55 ` Philippe Mathieu-Daudé
2023-06-30 13:21 ` [PATCH 03/24] linux-user/strace: Expand struct flags to hold a mask Richard Henderson
` (21 subsequent siblings)
23 siblings, 2 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Fix all checkpatch.pl errors within mmap.c.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 199 ++++++++++++++++++++++++++++------------------
1 file changed, 122 insertions(+), 77 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 83cbcce769..cb5369d2d1 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -55,10 +55,11 @@ void mmap_fork_start(void)
void mmap_fork_end(int child)
{
- if (child)
+ if (child) {
pthread_mutex_init(&mmap_mutex, NULL);
- else
+ } else {
pthread_mutex_unlock(&mmap_mutex);
+ }
}
/*
@@ -202,40 +203,47 @@ static int mmap_frag(abi_ulong real_start,
/* get the protection of the target pages outside the mapping */
prot1 = 0;
- for(addr = real_start; addr < real_end; addr++) {
- if (addr < start || addr >= end)
+ for (addr = real_start; addr < real_end; addr++) {
+ if (addr < start || addr >= end) {
prot1 |= page_get_flags(addr);
+ }
}
if (prot1 == 0) {
/* no page was there, so we allocate one */
void *p = mmap(host_start, qemu_host_page_size, prot,
flags | MAP_ANONYMOUS, -1, 0);
- if (p == MAP_FAILED)
+ if (p == MAP_FAILED) {
return -1;
+ }
prot1 = prot;
}
prot1 &= PAGE_BITS;
prot_new = prot | prot1;
if (!(flags & MAP_ANONYMOUS)) {
- /* msync() won't work here, so we return an error if write is
- possible while it is a shared mapping */
- if ((flags & MAP_TYPE) == MAP_SHARED &&
- (prot & PROT_WRITE))
+ /*
+ * msync() won't work here, so we return an error if write is
+ * possible while it is a shared mapping.
+ */
+ if ((flags & MAP_TYPE) == MAP_SHARED && (prot & PROT_WRITE)) {
return -1;
+ }
/* adjust protection to be able to read */
- if (!(prot1 & PROT_WRITE))
+ if (!(prot1 & PROT_WRITE)) {
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
+ }
/* read the corresponding file data */
- if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
+ if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
return -1;
+ }
/* put final protection */
- if (prot_new != (prot1 | PROT_WRITE))
+ if (prot_new != (prot1 | PROT_WRITE)) {
mprotect(host_start, qemu_host_page_size, prot_new);
+ }
} else {
if (prot_new != prot1) {
mprotect(host_start, qemu_host_page_size, prot_new);
@@ -264,8 +272,10 @@ abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
unsigned long last_brk;
-/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
- of guest address space. */
+/*
+ * Subroutine of mmap_find_vma, used when we have pre-allocated
+ * a chunk of guest address space.
+ */
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong align)
{
@@ -355,15 +365,17 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
* - shmat() with SHM_REMAP flag
*/
ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
- MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
/* ENOMEM, if host address space has no memory */
if (ptr == MAP_FAILED) {
return (abi_ulong)-1;
}
- /* Count the number of sequential returns of the same address.
- This is used to modify the search algorithm below. */
+ /*
+ * Count the number of sequential returns of the same address.
+ * This is used to modify the search algorithm below.
+ */
repeat = (ptr == prev ? repeat + 1 : 0);
if (h2g_valid(ptr + size - 1)) {
@@ -380,14 +392,18 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
/* The address is not properly aligned for the target. */
switch (repeat) {
case 0:
- /* Assume the result that the kernel gave us is the
- first with enough free space, so start again at the
- next higher target page. */
+ /*
+ * Assume the result that the kernel gave us is the
+ * first with enough free space, so start again at the
+ * next higher target page.
+ */
addr = ROUND_UP(addr, align);
break;
case 1:
- /* Sometimes the kernel decides to perform the allocation
- at the top end of memory instead. */
+ /*
+ * Sometimes the kernel decides to perform the allocation
+ * at the top end of memory instead.
+ */
addr &= -align;
break;
case 2:
@@ -400,8 +416,10 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
break;
}
} else {
- /* Since the result the kernel gave didn't fit, start
- again at low memory. If any repetition, fail. */
+ /*
+ * Since the result the kernel gave didn't fit, start
+ * again at low memory. If any repetition, fail.
+ */
addr = (repeat ? -1 : 0);
}
@@ -416,8 +434,10 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
return (abi_ulong)-1;
}
wrapped = 1;
- /* Don't actually use 0 when wrapping, instead indicate
- that we'd truly like an allocation in low memory. */
+ /*
+ * Don't actually use 0 when wrapping, instead indicate
+ * that we'd truly like an allocation in low memory.
+ */
addr = (mmap_min_addr > TARGET_PAGE_SIZE
? TARGET_PAGE_ALIGN(mmap_min_addr)
: TARGET_PAGE_SIZE);
@@ -478,8 +498,10 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
real_start = start & qemu_host_page_mask;
host_offset = offset & qemu_host_page_mask;
- /* If the user is asking for the kernel to find a location, do that
- before we truncate the length for mapping files below. */
+ /*
+ * If the user is asking for the kernel to find a location, do that
+ * before we truncate the length for mapping files below.
+ */
if (!(flags & MAP_FIXED)) {
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
@@ -490,32 +512,36 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
}
}
- /* When mapping files into a memory area larger than the file, accesses
- to pages beyond the file size will cause a SIGBUS.
-
- For example, if mmaping a file of 100 bytes on a host with 4K pages
- emulating a target with 8K pages, the target expects to be able to
- access the first 8K. But the host will trap us on any access beyond
- 4K.
-
- When emulating a target with a larger page-size than the hosts, we
- may need to truncate file maps at EOF and add extra anonymous pages
- up to the targets page boundary. */
-
+ /*
+ * When mapping files into a memory area larger than the file, accesses
+ * to pages beyond the file size will cause a SIGBUS.
+ *
+ * For example, if mmaping a file of 100 bytes on a host with 4K pages
+ * emulating a target with 8K pages, the target expects to be able to
+ * access the first 8K. But the host will trap us on any access beyond
+ * 4K.
+ *
+ * When emulating a target with a larger page-size than the hosts, we
+ * may need to truncate file maps at EOF and add extra anonymous pages
+ * up to the targets page boundary.
+ */
if ((qemu_real_host_page_size() < qemu_host_page_size) &&
!(flags & MAP_ANONYMOUS)) {
struct stat sb;
- if (fstat (fd, &sb) == -1)
- goto fail;
+ if (fstat(fd, &sb) == -1) {
+ goto fail;
+ }
- /* Are we trying to create a map beyond EOF?. */
- if (offset + len > sb.st_size) {
- /* If so, truncate the file map at eof aligned with
- the hosts real pagesize. Additional anonymous maps
- will be created beyond EOF. */
- len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
- }
+ /* Are we trying to create a map beyond EOF?. */
+ if (offset + len > sb.st_size) {
+ /*
+ * If so, truncate the file map at eof aligned with
+ * the hosts real pagesize. Additional anonymous maps
+ * will be created beyond EOF.
+ */
+ len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
+ }
}
if (!(flags & MAP_FIXED)) {
@@ -525,9 +551,11 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
- /* Note: we prefer to control the mapping address. It is
- especially important if qemu_host_page_size >
- qemu_real_host_page_size */
+ /*
+ * Note: we prefer to control the mapping address. It is
+ * especially important if qemu_host_page_size >
+ * qemu_real_host_page_size.
+ */
p = mmap(g2h_untagged(start), host_len, host_prot,
flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
@@ -565,45 +593,52 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
goto fail;
}
- /* worst case: we cannot map the file because the offset is not
- aligned, so we read it */
+ /*
+ * worst case: we cannot map the file because the offset is not
+ * aligned, so we read it
+ */
if (!(flags & MAP_ANONYMOUS) &&
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
- /* msync() won't work here, so we return an error if write is
- possible while it is a shared mapping */
- if ((flags & MAP_TYPE) == MAP_SHARED &&
- (host_prot & PROT_WRITE)) {
+ /*
+ * msync() won't work here, so we return an error if write is
+ * possible while it is a shared mapping
+ */
+ if ((flags & MAP_TYPE) == MAP_SHARED && (host_prot & PROT_WRITE)) {
errno = EINVAL;
goto fail;
}
retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
- if (retaddr == -1)
+ if (retaddr == -1) {
goto fail;
- if (pread(fd, g2h_untagged(start), len, offset) == -1)
+ }
+ if (pread(fd, g2h_untagged(start), len, offset) == -1) {
goto fail;
+ }
if (!(host_prot & PROT_WRITE)) {
ret = target_mprotect(start, len, target_prot);
assert(ret == 0);
}
goto the_end;
}
-
+
/* handle the start of the mapping */
if (start > real_start) {
if (real_end == real_start + qemu_host_page_size) {
/* one single host page */
ret = mmap_frag(real_start, start, end,
host_prot, flags, fd, offset);
- if (ret == -1)
+ if (ret == -1) {
goto fail;
+ }
goto the_end1;
}
ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
host_prot, flags, fd, offset);
- if (ret == -1)
+ if (ret == -1) {
goto fail;
+ }
real_start += qemu_host_page_size;
}
/* handle the end of the mapping */
@@ -612,8 +647,9 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
real_end - qemu_host_page_size, end,
host_prot, flags, fd,
offset + real_end - qemu_host_page_size - start);
- if (ret == -1)
+ if (ret == -1) {
goto fail;
+ }
real_end -= qemu_host_page_size;
}
@@ -621,14 +657,16 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
if (real_start < real_end) {
void *p;
unsigned long offset1;
- if (flags & MAP_ANONYMOUS)
+ if (flags & MAP_ANONYMOUS) {
offset1 = 0;
- else
+ } else {
offset1 = offset + real_start - start;
+ }
p = mmap(g2h_untagged(real_start), real_end - real_start,
host_prot, flags, fd, offset1);
- if (p == MAP_FAILED)
+ if (p == MAP_FAILED) {
goto fail;
+ }
passthrough_start = real_start;
passthrough_end = real_end;
}
@@ -690,16 +728,18 @@ static void mmap_reserve(abi_ulong start, abi_ulong size)
}
end = real_end;
}
- if (prot != 0)
+ if (prot != 0) {
real_start += qemu_host_page_size;
+ }
}
if (end < real_end) {
prot = 0;
for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
- if (prot != 0)
+ if (prot != 0) {
real_end -= qemu_host_page_size;
+ }
}
if (real_start != real_end) {
mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
@@ -715,8 +755,9 @@ int target_munmap(abi_ulong start, abi_ulong len)
trace_target_munmap(start, len);
- if (start & ~TARGET_PAGE_MASK)
+ if (start & ~TARGET_PAGE_MASK) {
return -TARGET_EINVAL;
+ }
len = TARGET_PAGE_ALIGN(len);
if (len == 0 || !guest_range_valid_untagged(start, len)) {
return -TARGET_EINVAL;
@@ -730,25 +771,27 @@ int target_munmap(abi_ulong start, abi_ulong len)
if (start > real_start) {
/* handle host page containing start */
prot = 0;
- for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
+ for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
if (real_end == real_start + qemu_host_page_size) {
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
end = real_end;
}
- if (prot != 0)
+ if (prot != 0) {
real_start += qemu_host_page_size;
+ }
}
if (end < real_end) {
prot = 0;
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
- if (prot != 0)
+ if (prot != 0) {
real_end -= qemu_host_page_size;
+ }
}
ret = 0;
@@ -791,8 +834,10 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
flags, g2h_untagged(new_addr));
if (reserved_va && host_addr != MAP_FAILED) {
- /* If new and old addresses overlap then the above mremap will
- already have failed with EINVAL. */
+ /*
+ * If new and old addresses overlap then the above mremap will
+ * already have failed with EINVAL.
+ */
mmap_reserve(old_addr, old_size);
}
} else if (flags & MREMAP_MAYMOVE) {
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 03/24] linux-user/strace: Expand struct flags to hold a mask
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
2023-06-30 13:21 ` [PATCH 01/24] linux-user: Use assert in mmap_fork_start Richard Henderson
2023-06-30 13:21 ` [PATCH 02/24] linux-user: Fix formatting of mmap.c Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 9:24 ` Alex Bennée
2023-06-30 13:21 ` [PATCH 04/24] linux-user: Split TARGET_MAP_* out of syscall_defs.h Richard Henderson
` (20 subsequent siblings)
23 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
A zero bit value does not make sense -- it must relate to
some field in some way.
Define FLAG_BASIC with a build-time sanity check.
Adjust FLAG_GENERIC and FLAG_TARGET to use it.
Add FLAG_GENERIC_MASK and FLAG_TARGET_MASK.
Fix up the existing flag definitions for build errors.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/strace.c | 40 ++++++++++++++++++++++------------------
1 file changed, 22 insertions(+), 18 deletions(-)
diff --git a/linux-user/strace.c b/linux-user/strace.c
index aad2b62ca4..566396d051 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -46,15 +46,21 @@ struct syscallname {
*/
struct flags {
abi_long f_value; /* flag */
+ abi_long f_mask; /* mask */
const char *f_string; /* stringified flag */
};
+/* No 'struct flags' element should have a zero mask. */
+#define FLAG_BASIC(V, M, N) { V, M | QEMU_BUILD_BUG_ON_ZERO(!(M)), N }
+
/* common flags for all architectures */
-#define FLAG_GENERIC(name) { name, #name }
+#define FLAG_GENERIC_MASK(V, M) FLAG_BASIC(V, M, #V)
+#define FLAG_GENERIC(V) FLAG_BASIC(V, V, #V)
/* target specific flags (syscall_defs.h has TARGET_<flag>) */
-#define FLAG_TARGET(name) { TARGET_ ## name, #name }
+#define FLAG_TARGET_MASK(V, M) FLAG_BASIC(TARGET_##V, TARGET_##M, #V)
+#define FLAG_TARGET(V) FLAG_BASIC(TARGET_##V, TARGET_##V, #V)
/* end of flags array */
-#define FLAG_END { 0, NULL }
+#define FLAG_END { 0, 0, NULL }
/* Structure used to translate enumerated values into strings */
struct enums {
@@ -963,7 +969,7 @@ print_syscall_ret_ioctl(CPUArchState *cpu_env, const struct syscallname *name,
#endif
UNUSED static const struct flags access_flags[] = {
- FLAG_GENERIC(F_OK),
+ FLAG_GENERIC_MASK(F_OK, R_OK | W_OK | X_OK),
FLAG_GENERIC(R_OK),
FLAG_GENERIC(W_OK),
FLAG_GENERIC(X_OK),
@@ -999,9 +1005,9 @@ UNUSED static const struct flags mode_flags[] = {
};
UNUSED static const struct flags open_access_flags[] = {
- FLAG_TARGET(O_RDONLY),
- FLAG_TARGET(O_WRONLY),
- FLAG_TARGET(O_RDWR),
+ FLAG_TARGET_MASK(O_RDONLY, O_ACCMODE),
+ FLAG_TARGET_MASK(O_WRONLY, O_ACCMODE),
+ FLAG_TARGET_MASK(O_RDWR, O_ACCMODE),
FLAG_END,
};
@@ -1010,7 +1016,9 @@ UNUSED static const struct flags open_flags[] = {
FLAG_TARGET(O_CREAT),
FLAG_TARGET(O_DIRECTORY),
FLAG_TARGET(O_EXCL),
+#if TARGET_O_LARGEFILE != 0
FLAG_TARGET(O_LARGEFILE),
+#endif
FLAG_TARGET(O_NOCTTY),
FLAG_TARGET(O_NOFOLLOW),
FLAG_TARGET(O_NONBLOCK), /* also O_NDELAY */
@@ -1075,7 +1083,7 @@ UNUSED static const struct flags umount2_flags[] = {
};
UNUSED static const struct flags mmap_prot_flags[] = {
- FLAG_GENERIC(PROT_NONE),
+ FLAG_GENERIC_MASK(PROT_NONE, PROT_READ | PROT_WRITE | PROT_EXEC),
FLAG_GENERIC(PROT_EXEC),
FLAG_GENERIC(PROT_READ),
FLAG_GENERIC(PROT_WRITE),
@@ -1103,7 +1111,7 @@ UNUSED static const struct flags mmap_flags[] = {
#ifdef MAP_POPULATE
FLAG_TARGET(MAP_POPULATE),
#endif
-#ifdef TARGET_MAP_UNINITIALIZED
+#if defined(TARGET_MAP_UNINITIALIZED) && TARGET_MAP_UNINITIALIZED != 0
FLAG_TARGET(MAP_UNINITIALIZED),
#endif
FLAG_TARGET(MAP_HUGETLB),
@@ -1201,13 +1209,13 @@ UNUSED static const struct flags statx_flags[] = {
FLAG_GENERIC(AT_SYMLINK_NOFOLLOW),
#endif
#ifdef AT_STATX_SYNC_AS_STAT
- FLAG_GENERIC(AT_STATX_SYNC_AS_STAT),
+ FLAG_GENERIC_MASK(AT_STATX_SYNC_AS_STAT, AT_STATX_SYNC_TYPE),
#endif
#ifdef AT_STATX_FORCE_SYNC
- FLAG_GENERIC(AT_STATX_FORCE_SYNC),
+ FLAG_GENERIC_MASK(AT_STATX_FORCE_SYNC, AT_STATX_SYNC_TYPE),
#endif
#ifdef AT_STATX_DONT_SYNC
- FLAG_GENERIC(AT_STATX_DONT_SYNC),
+ FLAG_GENERIC_MASK(AT_STATX_DONT_SYNC, AT_STATX_SYNC_TYPE),
#endif
FLAG_END,
};
@@ -1481,14 +1489,10 @@ print_flags(const struct flags *f, abi_long flags, int last)
const char *sep = "";
int n;
- if ((flags == 0) && (f->f_value == 0)) {
- qemu_log("%s%s", f->f_string, get_comma(last));
- return;
- }
for (n = 0; f->f_string != NULL; f++) {
- if ((f->f_value != 0) && ((flags & f->f_value) == f->f_value)) {
+ if ((flags & f->f_mask) == f->f_value) {
qemu_log("%s%s", sep, f->f_string);
- flags &= ~f->f_value;
+ flags &= ~f->f_mask;
sep = "|";
n++;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* Re: [PATCH 03/24] linux-user/strace: Expand struct flags to hold a mask
2023-06-30 13:21 ` [PATCH 03/24] linux-user/strace: Expand struct flags to hold a mask Richard Henderson
@ 2023-07-03 9:24 ` Alex Bennée
0 siblings, 0 replies; 43+ messages in thread
From: Alex Bennée @ 2023-07-03 9:24 UTC (permalink / raw)
To: Richard Henderson; +Cc: mjt, laurent, qemu-devel
Richard Henderson <richard.henderson@linaro.org> writes:
> A zero bit value does not make sense -- it must relate to
> some field in some way.
>
> Define FLAG_BASIC with a build-time sanity check.
> Adjust FLAG_GENERIC and FLAG_TARGET to use it.
> Add FLAG_GENERIC_MASK and FLAG_TARGET_MASK.
>
> Fix up the existing flag definitions for build errors.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
--
Alex Bennée
Virtualisation Tech Lead @ Linaro
^ permalink raw reply [flat|nested] 43+ messages in thread
* [PATCH 04/24] linux-user: Split TARGET_MAP_* out of syscall_defs.h
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (2 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 03/24] linux-user/strace: Expand struct flags to hold a mask Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 9:25 ` Alex Bennée
2023-06-30 13:21 ` [PATCH 05/24] linux-user: Split TARGET_PROT_* " Richard Henderson
` (19 subsequent siblings)
23 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Move the values into the per-target target_mman.h headers
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/alpha/target_mman.h | 13 +++++
linux-user/generic/target_mman.h | 54 ++++++++++++++++++++
linux-user/hppa/target_mman.h | 10 ++++
linux-user/mips/target_mman.h | 11 +++++
linux-user/mips64/target_mman.h | 2 +-
linux-user/ppc/target_mman.h | 3 ++
linux-user/sparc/target_mman.h | 4 ++
linux-user/syscall_defs.h | 85 +-------------------------------
linux-user/xtensa/target_mman.h | 11 +++++
9 files changed, 108 insertions(+), 85 deletions(-)
diff --git a/linux-user/alpha/target_mman.h b/linux-user/alpha/target_mman.h
index 051544f5ab..6bb03e7336 100644
--- a/linux-user/alpha/target_mman.h
+++ b/linux-user/alpha/target_mman.h
@@ -1,6 +1,19 @@
#ifndef ALPHA_TARGET_MMAN_H
#define ALPHA_TARGET_MMAN_H
+#define TARGET_MAP_ANONYMOUS 0x10
+#define TARGET_MAP_FIXED 0x100
+#define TARGET_MAP_GROWSDOWN 0x01000
+#define TARGET_MAP_DENYWRITE 0x02000
+#define TARGET_MAP_EXECUTABLE 0x04000
+#define TARGET_MAP_LOCKED 0x08000
+#define TARGET_MAP_NORESERVE 0x10000
+#define TARGET_MAP_POPULATE 0x20000
+#define TARGET_MAP_NONBLOCK 0x40000
+#define TARGET_MAP_STACK 0x80000
+#define TARGET_MAP_HUGETLB 0x100000
+#define TARGET_MAP_FIXED_NOREPLACE 0x200000
+
#define TARGET_MADV_DONTNEED 6
#define TARGET_MS_ASYNC 1
diff --git a/linux-user/generic/target_mman.h b/linux-user/generic/target_mman.h
index 32bf1a52d0..7b888fb7f8 100644
--- a/linux-user/generic/target_mman.h
+++ b/linux-user/generic/target_mman.h
@@ -1,6 +1,60 @@
#ifndef LINUX_USER_TARGET_MMAN_H
#define LINUX_USER_TARGET_MMAN_H
+/* These are defined in linux/mmap.h */
+#define TARGET_MAP_SHARED 0x01
+#define TARGET_MAP_PRIVATE 0x02
+#define TARGET_MAP_SHARED_VALIDATE 0x03
+
+/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
+#ifndef TARGET_MAP_GROWSDOWN
+#define TARGET_MAP_GROWSDOWN 0x0100
+#endif
+#ifndef TARGET_MAP_DENYWRITE
+#define TARGET_MAP_DENYWRITE 0x0800
+#endif
+#ifndef TARGET_MAP_EXECUTABLE
+#define TARGET_MAP_EXECUTABLE 0x1000
+#endif
+#ifndef TARGET_MAP_LOCKED
+#define TARGET_MAP_LOCKED 0x2000
+#endif
+#ifndef TARGET_MAP_NORESERVE
+#define TARGET_MAP_NORESERVE 0x4000
+#endif
+
+/* Other MAP flags are defined in asm-generic/mman-common.h */
+#ifndef TARGET_MAP_TYPE
+#define TARGET_MAP_TYPE 0x0f
+#endif
+#ifndef TARGET_MAP_FIXED
+#define TARGET_MAP_FIXED 0x10
+#endif
+#ifndef TARGET_MAP_ANONYMOUS
+#define TARGET_MAP_ANONYMOUS 0x20
+#endif
+#ifndef TARGET_MAP_POPULATE
+#define TARGET_MAP_POPULATE 0x008000
+#endif
+#ifndef TARGET_MAP_NONBLOCK
+#define TARGET_MAP_NONBLOCK 0x010000
+#endif
+#ifndef TARGET_MAP_STACK
+#define TARGET_MAP_STACK 0x020000
+#endif
+#ifndef TARGET_MAP_HUGETLB
+#define TARGET_MAP_HUGETLB 0x040000
+#endif
+#ifndef TARGET_MAP_SYNC
+#define TARGET_MAP_SYNC 0x080000
+#endif
+#ifndef TARGET_MAP_FIXED_NOREPLACE
+#define TARGET_MAP_FIXED_NOREPLACE 0x100000
+#endif
+#ifndef TARGET_MAP_UNINITIALIZED
+#define TARGET_MAP_UNINITIALIZED 0x4000000
+#endif
+
#ifndef TARGET_MADV_NORMAL
#define TARGET_MADV_NORMAL 0
#endif
diff --git a/linux-user/hppa/target_mman.h b/linux-user/hppa/target_mman.h
index f9b6b97032..97f87d042a 100644
--- a/linux-user/hppa/target_mman.h
+++ b/linux-user/hppa/target_mman.h
@@ -1,6 +1,16 @@
#ifndef HPPA_TARGET_MMAN_H
#define HPPA_TARGET_MMAN_H
+#define TARGET_MAP_TYPE 0x2b
+#define TARGET_MAP_FIXED 0x04
+#define TARGET_MAP_ANONYMOUS 0x10
+#define TARGET_MAP_GROWSDOWN 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+#define TARGET_MAP_UNINITIALIZED 0
+
#define TARGET_MADV_MERGEABLE 65
#define TARGET_MADV_UNMERGEABLE 66
#define TARGET_MADV_HUGEPAGE 67
diff --git a/linux-user/mips/target_mman.h b/linux-user/mips/target_mman.h
index e7ba6070fe..d1d96decf5 100644
--- a/linux-user/mips/target_mman.h
+++ b/linux-user/mips/target_mman.h
@@ -1 +1,12 @@
+#define TARGET_MAP_NORESERVE 0x0400
+#define TARGET_MAP_ANONYMOUS 0x0800
+#define TARGET_MAP_GROWSDOWN 0x1000
+#define TARGET_MAP_DENYWRITE 0x2000
+#define TARGET_MAP_EXECUTABLE 0x4000
+#define TARGET_MAP_LOCKED 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+
#include "../generic/target_mman.h"
diff --git a/linux-user/mips64/target_mman.h b/linux-user/mips64/target_mman.h
index e7ba6070fe..7bdc47d902 100644
--- a/linux-user/mips64/target_mman.h
+++ b/linux-user/mips64/target_mman.h
@@ -1 +1 @@
-#include "../generic/target_mman.h"
+#include "../mips/target_mman.h"
diff --git a/linux-user/ppc/target_mman.h b/linux-user/ppc/target_mman.h
index e7ba6070fe..c90be347f6 100644
--- a/linux-user/ppc/target_mman.h
+++ b/linux-user/ppc/target_mman.h
@@ -1 +1,4 @@
+#define TARGET_MAP_NORESERVE 0x40
+#define TARGET_MAP_LOCKED 0x80
+
#include "../generic/target_mman.h"
diff --git a/linux-user/sparc/target_mman.h b/linux-user/sparc/target_mman.h
index e7ba6070fe..3fdee19d8a 100644
--- a/linux-user/sparc/target_mman.h
+++ b/linux-user/sparc/target_mman.h
@@ -1 +1,5 @@
+#define TARGET_MAP_NORESERVE 0x40
+#define TARGET_MAP_LOCKED 0x100
+#define TARGET_MAP_GROWSDOWN 0x0200
+
#include "../generic/target_mman.h"
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index cc37054cb5..118a8ac7da 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -1262,90 +1262,7 @@ struct target_winsize {
#define TARGET_PROT_MTE 0x20
#endif
-/* Common */
-#define TARGET_MAP_SHARED 0x01 /* Share changes */
-#define TARGET_MAP_PRIVATE 0x02 /* Changes are private */
-#if defined(TARGET_HPPA)
-#define TARGET_MAP_TYPE 0x03 /* Mask for type of mapping */
-#else
-#define TARGET_MAP_TYPE 0x0f /* Mask for type of mapping */
-#endif
-
-/* Target specific */
-#if defined(TARGET_MIPS)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x0800 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x1000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x2000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x4000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x8000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0400 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#elif defined(TARGET_PPC)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x20 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x0080 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0040 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x20000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#elif defined(TARGET_ALPHA)
-#define TARGET_MAP_ANONYMOUS 0x10 /* don't use a file */
-#define TARGET_MAP_FIXED 0x100 /* Interpret addr exactly */
-#define TARGET_MAP_GROWSDOWN 0x01000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x02000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x04000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x08000 /* lock the mapping */
-#define TARGET_MAP_NORESERVE 0x10000 /* no check for reservations */
-#define TARGET_MAP_POPULATE 0x20000 /* pop (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x40000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x80000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x100000 /* create a huge page mapping */
-#elif defined(TARGET_HPPA)
-#define TARGET_MAP_ANONYMOUS 0x10 /* don't use a file */
-#define TARGET_MAP_FIXED 0x04 /* Interpret addr exactly */
-#define TARGET_MAP_GROWSDOWN 0x08000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x00800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x01000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x02000 /* lock the mapping */
-#define TARGET_MAP_NORESERVE 0x04000 /* no check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* pop (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#elif defined(TARGET_XTENSA)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x0800 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x1000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x2000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x4000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x8000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0400 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#else
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x20 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x2000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x20000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#define TARGET_MAP_UNINITIALIZED 0x4000000 /* for anonymous mmap, memory could be uninitialized */
-#endif
+#include "target_mman.h"
#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
|| (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
diff --git a/linux-user/xtensa/target_mman.h b/linux-user/xtensa/target_mman.h
index e7ba6070fe..d1d96decf5 100644
--- a/linux-user/xtensa/target_mman.h
+++ b/linux-user/xtensa/target_mman.h
@@ -1 +1,12 @@
+#define TARGET_MAP_NORESERVE 0x0400
+#define TARGET_MAP_ANONYMOUS 0x0800
+#define TARGET_MAP_GROWSDOWN 0x1000
+#define TARGET_MAP_DENYWRITE 0x2000
+#define TARGET_MAP_EXECUTABLE 0x4000
+#define TARGET_MAP_LOCKED 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+
#include "../generic/target_mman.h"
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 05/24] linux-user: Split TARGET_PROT_* out of syscall_defs.h
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (3 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 04/24] linux-user: Split TARGET_MAP_* out of syscall_defs.h Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 9:32 ` Alex Bennée
2023-07-03 16:02 ` Philippe Mathieu-Daudé
2023-06-30 13:21 ` [PATCH 06/24] linux-user: Populate more bits in mmap_flags_tbl Richard Henderson
` (18 subsequent siblings)
23 siblings, 2 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Move the values into the per-target target_mman.h headers
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/aarch64/target_mman.h | 3 +++
linux-user/generic/target_mman.h | 4 ++++
linux-user/mips/target_mman.h | 2 ++
linux-user/syscall_defs.h | 11 -----------
linux-user/xtensa/target_mman.h | 2 ++
5 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/linux-user/aarch64/target_mman.h b/linux-user/aarch64/target_mman.h
index e7ba6070fe..7f15cab25e 100644
--- a/linux-user/aarch64/target_mman.h
+++ b/linux-user/aarch64/target_mman.h
@@ -1 +1,4 @@
+#define TARGET_PROT_BTI 0x10
+#define TARGET_PROT_MTE 0x20
+
#include "../generic/target_mman.h"
diff --git a/linux-user/generic/target_mman.h b/linux-user/generic/target_mman.h
index 7b888fb7f8..39a650e751 100644
--- a/linux-user/generic/target_mman.h
+++ b/linux-user/generic/target_mman.h
@@ -1,6 +1,10 @@
#ifndef LINUX_USER_TARGET_MMAN_H
#define LINUX_USER_TARGET_MMAN_H
+#ifndef TARGET_PROT_SEM
+#define TARGET_PROT_SEM 0x08
+#endif
+
/* These are defined in linux/mmap.h */
#define TARGET_MAP_SHARED 0x01
#define TARGET_MAP_PRIVATE 0x02
diff --git a/linux-user/mips/target_mman.h b/linux-user/mips/target_mman.h
index d1d96decf5..e9f3905a52 100644
--- a/linux-user/mips/target_mman.h
+++ b/linux-user/mips/target_mman.h
@@ -1,3 +1,5 @@
+#define TARGET_PROT_SEM 0x10
+
#define TARGET_MAP_NORESERVE 0x0400
#define TARGET_MAP_ANONYMOUS 0x0800
#define TARGET_MAP_GROWSDOWN 0x1000
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 118a8ac7da..9387ed422d 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -1251,17 +1251,6 @@ struct target_winsize {
#include "termbits.h"
-#if defined(TARGET_MIPS) || defined(TARGET_XTENSA)
-#define TARGET_PROT_SEM 0x10
-#else
-#define TARGET_PROT_SEM 0x08
-#endif
-
-#ifdef TARGET_AARCH64
-#define TARGET_PROT_BTI 0x10
-#define TARGET_PROT_MTE 0x20
-#endif
-
#include "target_mman.h"
#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
diff --git a/linux-user/xtensa/target_mman.h b/linux-user/xtensa/target_mman.h
index d1d96decf5..e9f3905a52 100644
--- a/linux-user/xtensa/target_mman.h
+++ b/linux-user/xtensa/target_mman.h
@@ -1,3 +1,5 @@
+#define TARGET_PROT_SEM 0x10
+
#define TARGET_MAP_NORESERVE 0x0400
#define TARGET_MAP_ANONYMOUS 0x0800
#define TARGET_MAP_GROWSDOWN 0x1000
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* Re: [PATCH 05/24] linux-user: Split TARGET_PROT_* out of syscall_defs.h
2023-06-30 13:21 ` [PATCH 05/24] linux-user: Split TARGET_PROT_* " Richard Henderson
@ 2023-07-03 9:32 ` Alex Bennée
2023-07-03 16:02 ` Philippe Mathieu-Daudé
1 sibling, 0 replies; 43+ messages in thread
From: Alex Bennée @ 2023-07-03 9:32 UTC (permalink / raw)
To: Richard Henderson; +Cc: mjt, laurent, qemu-devel
Richard Henderson <richard.henderson@linaro.org> writes:
> Move the values into the per-target target_mman.h headers
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> linux-user/aarch64/target_mman.h | 3 +++
> linux-user/generic/target_mman.h | 4 ++++
> linux-user/mips/target_mman.h | 2 ++
> linux-user/syscall_defs.h | 11 -----------
> linux-user/xtensa/target_mman.h | 2 ++
> 5 files changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/linux-user/aarch64/target_mman.h b/linux-user/aarch64/target_mman.h
> index e7ba6070fe..7f15cab25e 100644
> --- a/linux-user/aarch64/target_mman.h
> +++ b/linux-user/aarch64/target_mman.h
> @@ -1 +1,4 @@
> +#define TARGET_PROT_BTI 0x10
> +#define TARGET_PROT_MTE 0x20
> +
> #include "../generic/target_mman.h"
> diff --git a/linux-user/generic/target_mman.h b/linux-user/generic/target_mman.h
> index 7b888fb7f8..39a650e751 100644
> --- a/linux-user/generic/target_mman.h
> +++ b/linux-user/generic/target_mman.h
> @@ -1,6 +1,10 @@
> #ifndef LINUX_USER_TARGET_MMAN_H
> #define LINUX_USER_TARGET_MMAN_H
>
> +#ifndef TARGET_PROT_SEM
> +#define TARGET_PROT_SEM 0x08
> +#endif
> +
I wonder if it is worth adding some commentary at the top that this file
should only be included from the arch specific target_mman.h as there is
an implicit ordering requirement for #ifndef to work properly.
Anyway:
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
--
Alex Bennée
Virtualisation Tech Lead @ Linaro
^ permalink raw reply [flat|nested] 43+ messages in thread
* Re: [PATCH 05/24] linux-user: Split TARGET_PROT_* out of syscall_defs.h
2023-06-30 13:21 ` [PATCH 05/24] linux-user: Split TARGET_PROT_* " Richard Henderson
2023-07-03 9:32 ` Alex Bennée
@ 2023-07-03 16:02 ` Philippe Mathieu-Daudé
1 sibling, 0 replies; 43+ messages in thread
From: Philippe Mathieu-Daudé @ 2023-07-03 16:02 UTC (permalink / raw)
To: Richard Henderson, qemu-devel; +Cc: mjt, laurent
On 30/6/23 15:21, Richard Henderson wrote:
> Move the values into the per-target target_mman.h headers
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> linux-user/aarch64/target_mman.h | 3 +++
> linux-user/generic/target_mman.h | 4 ++++
> linux-user/mips/target_mman.h | 2 ++
> linux-user/syscall_defs.h | 11 -----------
> linux-user/xtensa/target_mman.h | 2 ++
> 5 files changed, 11 insertions(+), 11 deletions(-)
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
^ permalink raw reply [flat|nested] 43+ messages in thread
* [PATCH 06/24] linux-user: Populate more bits in mmap_flags_tbl
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (4 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 05/24] linux-user: Split TARGET_PROT_* " Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 9:42 ` Alex Bennée
2023-06-30 13:21 ` [PATCH 07/24] accel/tcg: Introduce page_check_range_empty Richard Henderson
` (17 subsequent siblings)
23 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Fix translation of TARGET_MAP_SHARED and TARGET_MAP_PRIVATE,
which are types not single bits. Add TARGET_MAP_SHARED_VALIDATE,
TARGET_MAP_SYNC, TARGET_MAP_NONBLOCK, TARGET_MAP_POPULATE,
TARGET_MAP_FIXED_NOREPLACE, and TARGET_MAP_UNINITIALIZED.
Update strace to match.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/strace.c | 23 ++++++++++-------------
linux-user/syscall.c | 18 ++++++++++++++++--
2 files changed, 26 insertions(+), 15 deletions(-)
diff --git a/linux-user/strace.c b/linux-user/strace.c
index 566396d051..af5c5f135b 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -1094,28 +1094,25 @@ UNUSED static const struct flags mmap_prot_flags[] = {
};
UNUSED static const struct flags mmap_flags[] = {
- FLAG_TARGET(MAP_SHARED),
- FLAG_TARGET(MAP_PRIVATE),
+ FLAG_TARGET_MASK(MAP_SHARED, MAP_TYPE),
+ FLAG_TARGET_MASK(MAP_PRIVATE, MAP_TYPE),
+ FLAG_TARGET_MASK(MAP_SHARED_VALIDATE, MAP_TYPE),
FLAG_TARGET(MAP_ANONYMOUS),
FLAG_TARGET(MAP_DENYWRITE),
- FLAG_TARGET(MAP_FIXED),
- FLAG_TARGET(MAP_GROWSDOWN),
FLAG_TARGET(MAP_EXECUTABLE),
-#ifdef MAP_LOCKED
+ FLAG_TARGET(MAP_FIXED),
+ FLAG_TARGET(MAP_FIXED_NOREPLACE),
+ FLAG_TARGET(MAP_GROWSDOWN),
+ FLAG_TARGET(MAP_HUGETLB),
FLAG_TARGET(MAP_LOCKED),
-#endif
-#ifdef MAP_NONBLOCK
FLAG_TARGET(MAP_NONBLOCK),
-#endif
FLAG_TARGET(MAP_NORESERVE),
-#ifdef MAP_POPULATE
FLAG_TARGET(MAP_POPULATE),
-#endif
-#if defined(TARGET_MAP_UNINITIALIZED) && TARGET_MAP_UNINITIALIZED != 0
+ FLAG_TARGET(MAP_STACK),
+ FLAG_TARGET(MAP_SYNC),
+#if TARGET_MAP_UNINITIALIZED != 0
FLAG_TARGET(MAP_UNINITIALIZED),
#endif
- FLAG_TARGET(MAP_HUGETLB),
- FLAG_TARGET(MAP_STACK),
FLAG_END,
};
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index f2cb101d83..459a2d8bc7 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -6002,9 +6002,16 @@ static const StructEntry struct_termios_def = {
.print = print_termios,
};
+/* If the host does not provide these bits, they may be safely discarded. */
+#ifndef MAP_UNINITIALIZED
+#define MAP_UNINITIALIZED 0
+#endif
+
static const bitmask_transtbl mmap_flags_tbl[] = {
- { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
- { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
+ { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
+ { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
+ { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
+ MAP_TYPE, MAP_SHARED_VALIDATE },
{ TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
{ TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
MAP_ANONYMOUS, MAP_ANONYMOUS },
@@ -6022,6 +6029,13 @@ static const bitmask_transtbl mmap_flags_tbl[] = {
Recognize it for the target insofar as we do not want to pass
it through to the host. */
{ TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
+ { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
+ { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
+ { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
+ { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
+ MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
+ { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
+ MAP_UNINITIALIZED, MAP_UNINITIALIZED },
{ 0, 0, 0, 0 }
};
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* Re: [PATCH 06/24] linux-user: Populate more bits in mmap_flags_tbl
2023-06-30 13:21 ` [PATCH 06/24] linux-user: Populate more bits in mmap_flags_tbl Richard Henderson
@ 2023-07-03 9:42 ` Alex Bennée
0 siblings, 0 replies; 43+ messages in thread
From: Alex Bennée @ 2023-07-03 9:42 UTC (permalink / raw)
To: Richard Henderson; +Cc: mjt, laurent, qemu-devel
Richard Henderson <richard.henderson@linaro.org> writes:
> Fix translation of TARGET_MAP_SHARED and TARGET_MAP_PRIVATE,
> which are types not single bits. Add TARGET_MAP_SHARED_VALIDATE,
> TARGET_MAP_SYNC, TARGET_MAP_NONBLOCK, TARGET_MAP_POPULATE,
> TARGET_MAP_FIXED_NOREPLACE, and TARGET_MAP_UNINITIALIZED.
>
> Update strace to match.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
--
Alex Bennée
Virtualisation Tech Lead @ Linaro
^ permalink raw reply [flat|nested] 43+ messages in thread
* [PATCH 07/24] accel/tcg: Introduce page_check_range_empty
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (5 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 06/24] linux-user: Populate more bits in mmap_flags_tbl Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 9:45 ` Alex Bennée
2023-06-30 13:21 ` [PATCH 08/24] bsd-user: Use page_check_range_empty for MAP_EXCL Richard Henderson
` (16 subsequent siblings)
23 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Examine the interval tree to validate that a region
has no existing mappings.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-all.h | 11 +++++++++++
accel/tcg/user-exec.c | 7 +++++++
2 files changed, 18 insertions(+)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 8018ce783e..5b2c230d52 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -224,6 +224,17 @@ void page_set_flags(target_ulong start, target_ulong last, int flags);
void page_reset_target_data(target_ulong start, target_ulong last);
int page_check_range(target_ulong start, target_ulong len, int flags);
+/**
+ * page_check_range_empty:
+ * @start: first byte of range
+ * @last: last byte of range
+ *
+ * Return true if the entire range [@start, @last] is unmapped.
+ * The memory lock must be held, as the caller will want to ensure
+ * the result stays true until a new mapping can be installed.
+ */
+bool page_check_range_empty(target_ulong start, target_ulong last);
+
/**
* page_get_target_data(address)
* @address: guest virtual address
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 8fbcbf9771..25c605dc1b 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -598,6 +598,13 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
return ret;
}
+bool page_check_range_empty(target_ulong start, target_ulong last)
+{
+ assert(last >= start);
+ assert_memory_lock();
+ return pageflags_find(start, last) == NULL;
+}
+
void page_protect(tb_page_addr_t address)
{
PageFlagsNode *p;
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* Re: [PATCH 07/24] accel/tcg: Introduce page_check_range_empty
2023-06-30 13:21 ` [PATCH 07/24] accel/tcg: Introduce page_check_range_empty Richard Henderson
@ 2023-07-03 9:45 ` Alex Bennée
2023-07-03 10:33 ` Richard Henderson
0 siblings, 1 reply; 43+ messages in thread
From: Alex Bennée @ 2023-07-03 9:45 UTC (permalink / raw)
To: Richard Henderson; +Cc: mjt, laurent, qemu-devel
Richard Henderson <richard.henderson@linaro.org> writes:
> Examine the interval tree to validate that a region
> has no existing mappings.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/exec/cpu-all.h | 11 +++++++++++
> accel/tcg/user-exec.c | 7 +++++++
> 2 files changed, 18 insertions(+)
>
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index 8018ce783e..5b2c230d52 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -224,6 +224,17 @@ void page_set_flags(target_ulong start, target_ulong last, int flags);
> void page_reset_target_data(target_ulong start, target_ulong last);
> int page_check_range(target_ulong start, target_ulong len, int flags);
>
> +/**
> + * page_check_range_empty:
> + * @start: first byte of range
> + * @last: last byte of range
* Context: holding mmap lock
Otherwise:
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
--
Alex Bennée
Virtualisation Tech Lead @ Linaro
^ permalink raw reply [flat|nested] 43+ messages in thread
* Re: [PATCH 07/24] accel/tcg: Introduce page_check_range_empty
2023-07-03 9:45 ` Alex Bennée
@ 2023-07-03 10:33 ` Richard Henderson
2023-07-03 11:36 ` Alex Bennée
0 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-07-03 10:33 UTC (permalink / raw)
To: Alex Bennée; +Cc: mjt, laurent, qemu-devel
On 7/3/23 11:45, Alex Bennée wrote:
>
> Richard Henderson <richard.henderson@linaro.org> writes:
>
>> Examine the interval tree to validate that a region
>> has no existing mappings.
>>
>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>> include/exec/cpu-all.h | 11 +++++++++++
>> accel/tcg/user-exec.c | 7 +++++++
>> 2 files changed, 18 insertions(+)
>>
>> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
>> index 8018ce783e..5b2c230d52 100644
>> --- a/include/exec/cpu-all.h
>> +++ b/include/exec/cpu-all.h
>> @@ -224,6 +224,17 @@ void page_set_flags(target_ulong start, target_ulong last, int flags);
>> void page_reset_target_data(target_ulong start, target_ulong last);
>> int page_check_range(target_ulong start, target_ulong len, int flags);
>>
>> +/**
>> + * page_check_range_empty:
>> + * @start: first byte of range
>> + * @last: last byte of range
>
> * Context: holding mmap lock
Is this parsable magic? Two lines below I do say "memory lock must be held"...
r~
>
> Otherwise:
>
> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
>
>
^ permalink raw reply [flat|nested] 43+ messages in thread
* Re: [PATCH 07/24] accel/tcg: Introduce page_check_range_empty
2023-07-03 10:33 ` Richard Henderson
@ 2023-07-03 11:36 ` Alex Bennée
0 siblings, 0 replies; 43+ messages in thread
From: Alex Bennée @ 2023-07-03 11:36 UTC (permalink / raw)
To: Richard Henderson; +Cc: mjt, laurent, qemu-devel
Richard Henderson <richard.henderson@linaro.org> writes:
> On 7/3/23 11:45, Alex Bennée wrote:
>> Richard Henderson <richard.henderson@linaro.org> writes:
>>
>>> Examine the interval tree to validate that a region
>>> has no existing mappings.
>>>
>>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>>> ---
>>> include/exec/cpu-all.h | 11 +++++++++++
>>> accel/tcg/user-exec.c | 7 +++++++
>>> 2 files changed, 18 insertions(+)
>>>
>>> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
>>> index 8018ce783e..5b2c230d52 100644
>>> --- a/include/exec/cpu-all.h
>>> +++ b/include/exec/cpu-all.h
>>> @@ -224,6 +224,17 @@ void page_set_flags(target_ulong start, target_ulong last, int flags);
>>> void page_reset_target_data(target_ulong start, target_ulong last);
>>> int page_check_range(target_ulong start, target_ulong len, int flags);
>>> +/**
>>> + * page_check_range_empty:
>>> + * @start: first byte of range
>>> + * @last: last byte of range
>> * Context: holding mmap lock
>
> Is this parsable magic? Two lines below I do say "memory lock must be
> held"...
Yeah - the kerneldoc describes it as:
* Context: Describes whether the function can sleep, what locks it takes,
* releases, or expects to be held. It can extend over multiple
* lines.
We use it in a couple of places in the existing code although mostly
referring to the BQL.
>
>
> r~
>
>> Otherwise:
>> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
>>
--
Alex Bennée
Virtualisation Tech Lead @ Linaro
^ permalink raw reply [flat|nested] 43+ messages in thread
* [PATCH 08/24] bsd-user: Use page_check_range_empty for MAP_EXCL
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (6 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 07/24] accel/tcg: Introduce page_check_range_empty Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 18:46 ` Warner Losh
2023-06-30 13:21 ` [PATCH 09/24] linux-user: Implement MAP_FIXED_NOREPLACE Richard Henderson
` (15 subsequent siblings)
23 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent, Warner Losh, Kyle Evans
The previous check returned -1 when any page within
[start, start+len) is unmapped, not when all are unmapped.
Cc: Warner Losh <imp@bsdimp.com>
Cc: Kyle Evans <kevans@freebsd.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
bsd-user/mmap.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index 565b9f97ed..07b5b8055e 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -609,7 +609,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
}
/* Reject the mapping if any page within the range is mapped */
- if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
+ if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
errno = EINVAL;
goto fail;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* Re: [PATCH 08/24] bsd-user: Use page_check_range_empty for MAP_EXCL
2023-06-30 13:21 ` [PATCH 08/24] bsd-user: Use page_check_range_empty for MAP_EXCL Richard Henderson
@ 2023-06-30 18:46 ` Warner Losh
2023-06-30 19:40 ` Richard Henderson
0 siblings, 1 reply; 43+ messages in thread
From: Warner Losh @ 2023-06-30 18:46 UTC (permalink / raw)
To: Richard Henderson; +Cc: qemu-devel, mjt, laurent, Kyle Evans
[-- Attachment #1: Type: text/plain, Size: 1143 bytes --]
On Fri, Jun 30, 2023 at 7:22 AM Richard Henderson <
richard.henderson@linaro.org> wrote:
> The previous check returned -1 when any page within
> [start, start+len) is unmapped, not when all are unmapped.
>
> Cc: Warner Losh <imp@bsdimp.com>
> Cc: Kyle Evans <kevans@freebsd.org>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> bsd-user/mmap.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
> index 565b9f97ed..07b5b8055e 100644
> --- a/bsd-user/mmap.c
> +++ b/bsd-user/mmap.c
> @@ -609,7 +609,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len,
> int prot,
> }
>
> /* Reject the mapping if any page within the range is mapped */
> - if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
> + if ((flags & MAP_EXCL) && !page_check_range_empty(start, end -
> 1)) {
>
Won't computing end overflow if start is 1<<32 - len? Then subtracting one
here would be too late.
Warner
> errno = EINVAL;
> goto fail;
> }
> --
> 2.34.1
>
>
[-- Attachment #2: Type: text/html, Size: 1968 bytes --]
^ permalink raw reply [flat|nested] 43+ messages in thread
* Re: [PATCH 08/24] bsd-user: Use page_check_range_empty for MAP_EXCL
2023-06-30 18:46 ` Warner Losh
@ 2023-06-30 19:40 ` Richard Henderson
0 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 19:40 UTC (permalink / raw)
To: Warner Losh; +Cc: qemu-devel, mjt, laurent, Kyle Evans
On 6/30/23 20:46, Warner Losh wrote:
>
>
> On Fri, Jun 30, 2023 at 7:22 AM Richard Henderson <richard.henderson@linaro.org
> <mailto:richard.henderson@linaro.org>> wrote:
>
> The previous check returned -1 when any page within
> [start, start+len) is unmapped, not when all are unmapped.
>
> Cc: Warner Losh <imp@bsdimp.com <mailto:imp@bsdimp.com>>
> Cc: Kyle Evans <kevans@freebsd.org <mailto:kevans@freebsd.org>>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org
> <mailto:richard.henderson@linaro.org>>
> ---
> bsd-user/mmap.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
> index 565b9f97ed..07b5b8055e 100644
> --- a/bsd-user/mmap.c
> +++ b/bsd-user/mmap.c
> @@ -609,7 +609,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
> }
>
> /* Reject the mapping if any page within the range is mapped */
> - if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
> + if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
>
>
> Won't computing end overflow if start is 1<<32 - len? Then subtracting one here would be
> too late.
-fwrapv, so fully twos-compliment.
So start per above, end == 0, end - 1 == UINT32_MAX.
See also the rest of the linux-user code, wherein I work with "last == end - 1".
Which has the advantage of preserving comparison order: start < last.
r~
^ permalink raw reply [flat|nested] 43+ messages in thread
* [PATCH 09/24] linux-user: Implement MAP_FIXED_NOREPLACE
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (7 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 08/24] bsd-user: Use page_check_range_empty for MAP_EXCL Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 9:51 ` Alex Bennée
2023-06-30 13:21 ` [PATCH 10/24] linux-user: Split out target_to_host_prot Richard Henderson
` (14 subsequent siblings)
23 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index cb5369d2d1..41c2f09fd5 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -502,7 +502,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
* If the user is asking for the kernel to find a location, do that
* before we truncate the length for mapping files below.
*/
- if (!(flags & MAP_FIXED)) {
+ if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
@@ -544,7 +544,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
}
}
- if (!(flags & MAP_FIXED)) {
+ if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
unsigned long host_start;
void *p;
@@ -593,6 +593,13 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
goto fail;
}
+ /* Validate that the chosen range is empty. */
+ if ((flags & MAP_FIXED_NOREPLACE)
+ && !page_check_range_empty(start, end - 1)) {
+ errno = EEXIST;
+ goto fail;
+ }
+
/*
* worst case: we cannot map the file because the offset is not
* aligned, so we read it
@@ -608,7 +615,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
goto fail;
}
retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
+ (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))
+ | MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
if (retaddr == -1) {
goto fail;
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* Re: [PATCH 09/24] linux-user: Implement MAP_FIXED_NOREPLACE
2023-06-30 13:21 ` [PATCH 09/24] linux-user: Implement MAP_FIXED_NOREPLACE Richard Henderson
@ 2023-07-03 9:51 ` Alex Bennée
2023-07-03 10:39 ` Richard Henderson
0 siblings, 1 reply; 43+ messages in thread
From: Alex Bennée @ 2023-07-03 9:51 UTC (permalink / raw)
To: Richard Henderson; +Cc: mjt, laurent, qemu-devel
Richard Henderson <richard.henderson@linaro.org> writes:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> linux-user/mmap.c | 14 +++++++++++---
> 1 file changed, 11 insertions(+), 3 deletions(-)
>
> diff --git a/linux-user/mmap.c b/linux-user/mmap.c
> index cb5369d2d1..41c2f09fd5 100644
> --- a/linux-user/mmap.c
> +++ b/linux-user/mmap.c
> @@ -502,7 +502,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
> * If the user is asking for the kernel to find a location, do that
> * before we truncate the length for mapping files below.
> */
> - if (!(flags & MAP_FIXED)) {
> + if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
> host_len = len + offset - host_offset;
> host_len = HOST_PAGE_ALIGN(host_len);
> start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
> @@ -544,7 +544,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
> }
> }
>
> - if (!(flags & MAP_FIXED)) {
> + if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
> unsigned long host_start;
> void *p;
>
> @@ -593,6 +593,13 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
> goto fail;
> }
>
> + /* Validate that the chosen range is empty. */
> + if ((flags & MAP_FIXED_NOREPLACE)
> + && !page_check_range_empty(start, end - 1)) {
> + errno = EEXIST;
> + goto fail;
> + }
> +
> /*
> * worst case: we cannot map the file because the offset is not
> * aligned, so we read it
> @@ -608,7 +615,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
> goto fail;
> }
> retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
> - MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
> + (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))
> + | MAP_PRIVATE | MAP_ANONYMOUS,
I thought we always MAP_FIXED because we've already jumped the hoops to
work out where in the memory space this allocation is going. Now if the
guest doesn't specify MAP_FIXED the kernel might decide to put the
memory somewhere else, potentially out of reach of the guest addressing?
IOW I thought the MAP_FIXED here was an internal QEMU implementation
details rather than reflecting the guests wishes.
> -1, 0);
> if (retaddr == -1) {
> goto fail;
--
Alex Bennée
Virtualisation Tech Lead @ Linaro
^ permalink raw reply [flat|nested] 43+ messages in thread
* Re: [PATCH 09/24] linux-user: Implement MAP_FIXED_NOREPLACE
2023-07-03 9:51 ` Alex Bennée
@ 2023-07-03 10:39 ` Richard Henderson
0 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-07-03 10:39 UTC (permalink / raw)
To: Alex Bennée; +Cc: mjt, laurent, qemu-devel
On 7/3/23 11:51, Alex Bennée wrote:
>> @@ -544,7 +544,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>> }
>> }
>>
>> - if (!(flags & MAP_FIXED)) {
>> + if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
>> unsigned long host_start;
>> void *p;
>>
...
>> @@ -608,7 +615,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
>> goto fail;
>> }
>> retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
>> - MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
>> + (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))
>> + | MAP_PRIVATE | MAP_ANONYMOUS,
>
> I thought we always MAP_FIXED because we've already jumped the hoops to
> work out where in the memory space this allocation is going.
We have not, no. Not here, anyway.
> IOW I thought the MAP_FIXED here was an internal QEMU implementation
> details rather than reflecting the guests wishes.
As I read it, MAP_FIXED was here because we'd already checked for it in the IF that I
retained above. But now we're checking for 2 things, and must pass that on to the recursion.
r~
^ permalink raw reply [flat|nested] 43+ messages in thread
* [PATCH 10/24] linux-user: Split out target_to_host_prot
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (8 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 09/24] linux-user: Implement MAP_FIXED_NOREPLACE Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 10:12 ` Alex Bennée
2023-06-30 13:21 ` [PATCH 11/24] linux-user: Widen target_mmap offset argument to off_t Richard Henderson
` (13 subsequent siblings)
23 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Split out from validate_prot_to_pageflags, as there is not
one single host_prot for the entire range. We need to adjust
prot for every host page that overlaps multiple guest pages.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 77 +++++++++++++++++++++++++++--------------------
1 file changed, 44 insertions(+), 33 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 41c2f09fd5..26d409cb60 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -68,24 +68,11 @@ void mmap_fork_end(int child)
* Return 0 if the target prot bitmask is invalid, otherwise
* the internal qemu page_flags (which will include PAGE_VALID).
*/
-static int validate_prot_to_pageflags(int *host_prot, int prot)
+static int validate_prot_to_pageflags(int prot)
{
int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
- /*
- * For the host, we need not pass anything except read/write/exec.
- * While PROT_SEM is allowed by all hosts, it is also ignored, so
- * don't bother transforming guest bit to host bit. Any other
- * target-specific prot bits will not be understood by the host
- * and will need to be encoded into page_flags for qemu emulation.
- *
- * Pages that are executable by the guest will never be executed
- * by the host, but the host will need to be able to read them.
- */
- *host_prot = (prot & (PROT_READ | PROT_WRITE))
- | (prot & PROT_EXEC ? PROT_READ : 0);
-
#ifdef TARGET_AARCH64
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
@@ -113,18 +100,34 @@ static int validate_prot_to_pageflags(int *host_prot, int prot)
return prot & ~valid ? 0 : page_flags;
}
+/*
+ * For the host, we need not pass anything except read/write/exec.
+ * While PROT_SEM is allowed by all hosts, it is also ignored, so
+ * don't bother transforming guest bit to host bit. Any other
+ * target-specific prot bits will not be understood by the host
+ * and will need to be encoded into page_flags for qemu emulation.
+ *
+ * Pages that are executable by the guest will never be executed
+ * by the host, but the host will need to be able to read them.
+ */
+static int target_to_host_prot(int prot)
+{
+ return (prot & (PROT_READ | PROT_WRITE)) |
+ (prot & PROT_EXEC ? PROT_READ : 0);
+}
+
/* NOTE: all the constants are the HOST ones, but addresses are target. */
int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
{
abi_ulong end, host_start, host_end, addr;
- int prot1, ret, page_flags, host_prot;
+ int prot1, ret, page_flags;
trace_target_mprotect(start, len, target_prot);
if ((start & ~TARGET_PAGE_MASK) != 0) {
return -TARGET_EINVAL;
}
- page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
+ page_flags = validate_prot_to_pageflags(target_prot);
if (!page_flags) {
return -TARGET_EINVAL;
}
@@ -142,7 +145,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
host_end = HOST_PAGE_ALIGN(end);
if (start > host_start) {
/* handle host page containing start */
- prot1 = host_prot;
+ prot1 = target_prot;
for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
@@ -153,19 +156,19 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
end = host_end;
}
ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
- prot1 & PAGE_BITS);
+ target_to_host_prot(prot1));
if (ret != 0) {
goto error;
}
host_start += qemu_host_page_size;
}
if (end < host_end) {
- prot1 = host_prot;
+ prot1 = target_prot;
for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
- qemu_host_page_size, prot1 & PAGE_BITS);
+ qemu_host_page_size, target_to_host_prot(prot1));
if (ret != 0) {
goto error;
}
@@ -174,8 +177,8 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
/* handle the pages in the middle */
if (host_start < host_end) {
- ret = mprotect(g2h_untagged(host_start),
- host_end - host_start, host_prot);
+ ret = mprotect(g2h_untagged(host_start), host_end - host_start,
+ target_to_host_prot(target_prot));
if (ret != 0) {
goto error;
}
@@ -211,7 +214,8 @@ static int mmap_frag(abi_ulong real_start,
if (prot1 == 0) {
/* no page was there, so we allocate one */
- void *p = mmap(host_start, qemu_host_page_size, prot,
+ void *p = mmap(host_start, qemu_host_page_size,
+ target_to_host_prot(prot),
flags | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
return -1;
@@ -232,7 +236,8 @@ static int mmap_frag(abi_ulong real_start,
/* adjust protection to be able to read */
if (!(prot1 & PROT_WRITE)) {
- mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
+ mprotect(host_start, qemu_host_page_size,
+ target_to_host_prot(prot1) | PROT_WRITE);
}
/* read the corresponding file data */
@@ -242,11 +247,13 @@ static int mmap_frag(abi_ulong real_start,
/* put final protection */
if (prot_new != (prot1 | PROT_WRITE)) {
- mprotect(host_start, qemu_host_page_size, prot_new);
+ mprotect(host_start, qemu_host_page_size,
+ target_to_host_prot(prot_new));
}
} else {
if (prot_new != prot1) {
- mprotect(host_start, qemu_host_page_size, prot_new);
+ mprotect(host_start, qemu_host_page_size,
+ target_to_host_prot(prot_new));
}
if (prot_new & PROT_WRITE) {
memset(g2h_untagged(start), 0, end - start);
@@ -453,7 +460,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
{
abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len,
passthrough_start = -1, passthrough_end = -1;
- int page_flags, host_prot;
+ int page_flags;
mmap_lock();
trace_target_mmap(start, len, target_prot, flags, fd, offset);
@@ -463,7 +470,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
goto fail;
}
- page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
+ page_flags = validate_prot_to_pageflags(target_prot);
if (!page_flags) {
errno = EINVAL;
goto fail;
@@ -546,10 +553,12 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
unsigned long host_start;
+ int host_prot;
void *p;
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
+ host_prot = target_to_host_prot(target_prot);
/*
* Note: we prefer to control the mapping address. It is
@@ -606,6 +615,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
*/
if (!(flags & MAP_ANONYMOUS) &&
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
+ int host_prot = target_to_host_prot(target_prot);
+
/*
* msync() won't work here, so we return an error if write is
* possible while it is a shared mapping
@@ -614,7 +625,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
errno = EINVAL;
goto fail;
}
- retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
+ retaddr = target_mmap(start, len, host_prot | PROT_WRITE,
(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))
| MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
@@ -636,14 +647,14 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
if (real_end == real_start + qemu_host_page_size) {
/* one single host page */
ret = mmap_frag(real_start, start, end,
- host_prot, flags, fd, offset);
+ target_prot, flags, fd, offset);
if (ret == -1) {
goto fail;
}
goto the_end1;
}
ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
- host_prot, flags, fd, offset);
+ target_prot, flags, fd, offset);
if (ret == -1) {
goto fail;
}
@@ -653,7 +664,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
if (end < real_end) {
ret = mmap_frag(real_end - qemu_host_page_size,
real_end - qemu_host_page_size, end,
- host_prot, flags, fd,
+ target_prot, flags, fd,
offset + real_end - qemu_host_page_size - start);
if (ret == -1) {
goto fail;
@@ -671,7 +682,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
offset1 = offset + real_start - start;
}
p = mmap(g2h_untagged(real_start), real_end - real_start,
- host_prot, flags, fd, offset1);
+ target_to_host_prot(target_prot), flags, fd, offset1);
if (p == MAP_FAILED) {
goto fail;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 11/24] linux-user: Widen target_mmap offset argument to off_t
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (9 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 10/24] linux-user: Split out target_to_host_prot Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-07-03 10:47 ` Alex Bennée
2023-06-30 13:21 ` [PATCH 12/24] linux-user: Rewrite target_mprotect Richard Henderson
` (12 subsequent siblings)
23 siblings, 1 reply; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
We build with _FILE_OFFSET_BITS=64, so off_t = off64_t = uint64_t.
With an extra cast, this fixes emulation of mmap2, which could
overflow the computation of the full value of offset.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/user-mmap.h | 2 +-
linux-user/mmap.c | 14 ++++++++------
linux-user/syscall.c | 2 +-
3 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/linux-user/user-mmap.h b/linux-user/user-mmap.h
index 480ce1c114..3fc986f92f 100644
--- a/linux-user/user-mmap.h
+++ b/linux-user/user-mmap.h
@@ -20,7 +20,7 @@
int target_mprotect(abi_ulong start, abi_ulong len, int prot);
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
- int flags, int fd, abi_ulong offset);
+ int flags, int fd, off_t offset);
int target_munmap(abi_ulong start, abi_ulong len);
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_ulong new_size, unsigned long flags,
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 26d409cb60..98be739b5b 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -195,7 +195,7 @@ error:
/* map an incomplete host page */
static int mmap_frag(abi_ulong real_start,
abi_ulong start, abi_ulong end,
- int prot, int flags, int fd, abi_ulong offset)
+ int prot, int flags, int fd, off_t offset)
{
abi_ulong real_end, addr;
void *host_start;
@@ -456,11 +456,12 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
/* NOTE: all the constants are the HOST ones */
abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
- int flags, int fd, abi_ulong offset)
+ int flags, int fd, off_t offset)
{
- abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len,
+ abi_ulong ret, end, real_start, real_end, retaddr, host_len,
passthrough_start = -1, passthrough_end = -1;
int page_flags;
+ off_t host_offset;
mmap_lock();
trace_target_mmap(start, len, target_prot, flags, fd, offset);
@@ -552,7 +553,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
}
if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
- unsigned long host_start;
+ uintptr_t host_start;
int host_prot;
void *p;
@@ -571,7 +572,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
goto fail;
}
/* update start so that it points to the file position at 'offset' */
- host_start = (unsigned long)p;
+ host_start = (uintptr_t)p;
if (!(flags & MAP_ANONYMOUS)) {
p = mmap(g2h_untagged(start), len, host_prot,
flags | MAP_FIXED, fd, host_offset);
@@ -675,7 +676,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
/* map the middle (easier) */
if (real_start < real_end) {
void *p;
- unsigned long offset1;
+ off_t offset1;
+
if (flags & MAP_ANONYMOUS) {
offset1 = 0;
} else {
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 459a2d8bc7..94cb4ad8fb 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -10350,7 +10350,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#endif
ret = target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl),
- arg5, arg6 << MMAP_SHIFT);
+ arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
return get_errno(ret);
#endif
case TARGET_NR_munmap:
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 12/24] linux-user: Rewrite target_mprotect
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (10 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 11/24] linux-user: Widen target_mmap offset argument to off_t Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 13/24] linux-user: Rewrite mmap_frag Richard Henderson
` (11 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Use 'last' variables instead of 'end' variables.
When host page size > guest page size, detect when
adjacent host pages have the same protection and
merge that expanded host range into fewer syscalls.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 106 +++++++++++++++++++++++++++++-----------------
1 file changed, 67 insertions(+), 39 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 98be739b5b..c9bd72d3f9 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -119,8 +119,11 @@ static int target_to_host_prot(int prot)
/* NOTE: all the constants are the HOST ones, but addresses are target. */
int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
{
- abi_ulong end, host_start, host_end, addr;
- int prot1, ret, page_flags;
+ abi_ulong starts[3];
+ abi_ulong lens[3];
+ int prots[3];
+ abi_ulong host_start, host_last, last;
+ int prot1, ret, page_flags, nranges;
trace_target_mprotect(start, len, target_prot);
@@ -131,63 +134,88 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
if (!page_flags) {
return -TARGET_EINVAL;
}
- len = TARGET_PAGE_ALIGN(len);
- end = start + len;
- if (!guest_range_valid_untagged(start, len)) {
- return -TARGET_ENOMEM;
- }
if (len == 0) {
return 0;
}
+ len = TARGET_PAGE_ALIGN(len);
+ if (!guest_range_valid_untagged(start, len)) {
+ return -TARGET_ENOMEM;
+ }
+
+ last = start + len - 1;
+ host_start = start & qemu_host_page_mask;
+ host_last = HOST_PAGE_ALIGN(last) - 1;
+ nranges = 0;
mmap_lock();
- host_start = start & qemu_host_page_mask;
- host_end = HOST_PAGE_ALIGN(end);
- if (start > host_start) {
- /* handle host page containing start */
+
+ if (host_last - host_start < qemu_host_page_size) {
+ /* Single host page contains all guest pages: sum the prot. */
prot1 = target_prot;
- for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
+ for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a);
}
- if (host_end == host_start + qemu_host_page_size) {
- for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
+ for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a + 1);
+ }
+ starts[nranges] = host_start;
+ lens[nranges] = qemu_host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ } else {
+ if (host_start < start) {
+ /* Host page contains more than one guest page: sum the prot. */
+ prot1 = target_prot;
+ for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a);
+ }
+ /* If the resulting sum differs, create a new range. */
+ if (prot1 != target_prot) {
+ starts[nranges] = host_start;
+ lens[nranges] = qemu_host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ host_start += qemu_host_page_size;
}
- end = host_end;
}
- ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
- target_to_host_prot(prot1));
- if (ret != 0) {
- goto error;
+
+ if (last < host_last) {
+ /* Host page contains more than one guest page: sum the prot. */
+ prot1 = target_prot;
+ for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a + 1);
+ }
+ /* If the resulting sum differs, create a new range. */
+ if (prot1 != target_prot) {
+ host_last -= qemu_host_page_size;
+ starts[nranges] = host_last + 1;
+ lens[nranges] = qemu_host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ }
}
- host_start += qemu_host_page_size;
- }
- if (end < host_end) {
- prot1 = target_prot;
- for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
+
+ /* Create a range for the middle, if any remains. */
+ if (host_start < host_last) {
+ starts[nranges] = host_start;
+ lens[nranges] = host_last - host_start + 1;
+ prots[nranges] = target_prot;
+ nranges++;
}
- ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
- qemu_host_page_size, target_to_host_prot(prot1));
- if (ret != 0) {
- goto error;
- }
- host_end -= qemu_host_page_size;
}
- /* handle the pages in the middle */
- if (host_start < host_end) {
- ret = mprotect(g2h_untagged(host_start), host_end - host_start,
- target_to_host_prot(target_prot));
+ for (int i = 0; i < nranges; ++i) {
+ ret = mprotect(g2h_untagged(starts[i]), lens[i],
+ target_to_host_prot(prots[i]));
if (ret != 0) {
goto error;
}
}
- page_set_flags(start, start + len - 1, page_flags);
+ page_set_flags(start, last, page_flags);
ret = 0;
-error:
+ error:
mmap_unlock();
return ret;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 13/24] linux-user: Rewrite mmap_frag
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (11 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 12/24] linux-user: Rewrite target_mprotect Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 14/24] accel/tcg: Introduce page_find_range_empty Richard Henderson
` (10 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Use 'last' variables instead of 'end' variables.
Always zero MAP_ANONYMOUS fragments, which we previously
failed to do if they were not writable; early exit in case
we allocate a new page from the kernel, known zeros.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 123 +++++++++++++++++++++++-----------------------
1 file changed, 62 insertions(+), 61 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index c9bd72d3f9..e332555dd2 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -221,73 +221,76 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
}
/* map an incomplete host page */
-static int mmap_frag(abi_ulong real_start,
- abi_ulong start, abi_ulong end,
- int prot, int flags, int fd, off_t offset)
+static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
+ int prot, int flags, int fd, off_t offset)
{
- abi_ulong real_end, addr;
+ abi_ulong real_last;
void *host_start;
- int prot1, prot_new;
+ int prot_old, prot_new;
+ int host_prot_old, host_prot_new;
- real_end = real_start + qemu_host_page_size;
- host_start = g2h_untagged(real_start);
-
- /* get the protection of the target pages outside the mapping */
- prot1 = 0;
- for (addr = real_start; addr < real_end; addr++) {
- if (addr < start || addr >= end) {
- prot1 |= page_get_flags(addr);
- }
+ if (!(flags & MAP_ANONYMOUS)
+ && (flags & MAP_TYPE) == MAP_SHARED
+ && (prot & PROT_WRITE)) {
+ /*
+ * msync() won't work with the partial page, so we return an
+ * error if write is possible while it is a shared mapping.
+ */
+ errno = EINVAL;
+ return false;
}
- if (prot1 == 0) {
- /* no page was there, so we allocate one */
+ real_last = real_start + qemu_host_page_size - 1;
+ host_start = g2h_untagged(real_start);
+
+ /* Get the protection of the target pages outside the mapping. */
+ prot_old = 0;
+ for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot_old |= page_get_flags(a);
+ }
+ for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
+ prot_old |= page_get_flags(a);
+ }
+
+ if (prot_old == 0) {
+ /*
+ * Since !(prot_old & PAGE_VALID), there were no guest pages
+ * outside of the fragment we need to map. Allocate a new host
+ * page to cover, discarding whatever else may have been present.
+ */
void *p = mmap(host_start, qemu_host_page_size,
target_to_host_prot(prot),
flags | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
- return -1;
+ return false;
}
- prot1 = prot;
+ prot_old = prot;
}
- prot1 &= PAGE_BITS;
+ prot_new = prot | prot_old;
- prot_new = prot | prot1;
- if (!(flags & MAP_ANONYMOUS)) {
- /*
- * msync() won't work here, so we return an error if write is
- * possible while it is a shared mapping.
- */
- if ((flags & MAP_TYPE) == MAP_SHARED && (prot & PROT_WRITE)) {
- return -1;
- }
+ host_prot_old = target_to_host_prot(prot_old);
+ host_prot_new = target_to_host_prot(prot_new);
- /* adjust protection to be able to read */
- if (!(prot1 & PROT_WRITE)) {
- mprotect(host_start, qemu_host_page_size,
- target_to_host_prot(prot1) | PROT_WRITE);
- }
+ /* Adjust protection to be able to write. */
+ if (!(host_prot_old & PROT_WRITE)) {
+ host_prot_old |= PROT_WRITE;
+ mprotect(host_start, qemu_host_page_size, host_prot_old);
+ }
- /* read the corresponding file data */
- if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
- return -1;
- }
-
- /* put final protection */
- if (prot_new != (prot1 | PROT_WRITE)) {
- mprotect(host_start, qemu_host_page_size,
- target_to_host_prot(prot_new));
- }
+ /* Read or zero the new guest pages. */
+ if (flags & MAP_ANONYMOUS) {
+ memset(g2h_untagged(start), 0, last - start + 1);
} else {
- if (prot_new != prot1) {
- mprotect(host_start, qemu_host_page_size,
- target_to_host_prot(prot_new));
- }
- if (prot_new & PROT_WRITE) {
- memset(g2h_untagged(start), 0, end - start);
+ if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
+ return false;
}
}
- return 0;
+
+ /* Put final protection */
+ if (host_prot_new != host_prot_old) {
+ mprotect(host_start, qemu_host_page_size, host_prot_new);
+ }
+ return true;
}
#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
@@ -675,27 +678,25 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
if (start > real_start) {
if (real_end == real_start + qemu_host_page_size) {
/* one single host page */
- ret = mmap_frag(real_start, start, end,
- target_prot, flags, fd, offset);
- if (ret == -1) {
+ if (!mmap_frag(real_start, start, end - 1,
+ target_prot, flags, fd, offset)) {
goto fail;
}
goto the_end1;
}
- ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
- target_prot, flags, fd, offset);
- if (ret == -1) {
+ if (!mmap_frag(real_start, start,
+ real_start + qemu_host_page_size - 1,
+ target_prot, flags, fd, offset)) {
goto fail;
}
real_start += qemu_host_page_size;
}
/* handle the end of the mapping */
if (end < real_end) {
- ret = mmap_frag(real_end - qemu_host_page_size,
- real_end - qemu_host_page_size, end,
- target_prot, flags, fd,
- offset + real_end - qemu_host_page_size - start);
- if (ret == -1) {
+ if (!mmap_frag(real_end - qemu_host_page_size,
+ real_end - qemu_host_page_size, end - 1,
+ target_prot, flags, fd,
+ offset + real_end - qemu_host_page_size - start)) {
goto fail;
}
real_end -= qemu_host_page_size;
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 14/24] accel/tcg: Introduce page_find_range_empty
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (12 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 13/24] linux-user: Rewrite mmap_frag Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 15/24] bsd-user: Use page_find_range_empty for mmap_find_vma_reserved Richard Henderson
` (9 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Use the interval tree to locate an unused range in the VM.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-all.h | 15 +++++++++++++++
accel/tcg/user-exec.c | 41 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 56 insertions(+)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 5b2c230d52..e2195c7e3d 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -235,6 +235,21 @@ int page_check_range(target_ulong start, target_ulong len, int flags);
*/
bool page_check_range_empty(target_ulong start, target_ulong last);
+/**
+ * page_find_range_empty
+ * @min: first byte of search range
+ * @max: last byte of search range
+ * @len: size of the hole required
+ * @align: alignment of the hole required (power of 2)
+ *
+ * If there is a range [x, x+@len) within [@min, @max] such that
+ * x % @align == 0, then return x. Otherwise return -1.
+ * The memory lock must be held, as the caller will want to ensure
+ * the returned range stays empty until a new mapping can be installed.
+ */
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+ target_ulong len, target_ulong align);
+
/**
* page_get_target_data(address)
* @address: guest virtual address
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 25c605dc1b..dee4931f86 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -605,6 +605,47 @@ bool page_check_range_empty(target_ulong start, target_ulong last)
return pageflags_find(start, last) == NULL;
}
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+ target_ulong len, target_ulong align)
+{
+ target_ulong len_m1, align_m1;
+
+ assert(min <= max);
+ assert(max <= GUEST_ADDR_MAX);
+ assert(len != 0);
+ assert(is_power_of_2(align));
+ assert_memory_lock();
+
+ len_m1 = len - 1;
+ align_m1 = align - 1;
+
+ /* Iteratively narrow the search region. */
+ while (1) {
+ PageFlagsNode *p;
+
+ /* Align min and double-check there's enough space remaining. */
+ min = (min + align_m1) & ~align_m1;
+ if (min > max) {
+ return -1;
+ }
+ if (len_m1 > max - min) {
+ return -1;
+ }
+
+ p = pageflags_find(min, min + len_m1);
+ if (p == NULL) {
+ /* Found! */
+ return min;
+ }
+ if (max <= p->itree.last) {
+ /* Existing allocation fills the remainder of the search region. */
+ return -1;
+ }
+ /* Skip across existing allocation. */
+ min = p->itree.last + 1;
+ }
+}
+
void page_protect(tb_page_addr_t address)
{
PageFlagsNode *p;
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 15/24] bsd-user: Use page_find_range_empty for mmap_find_vma_reserved
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (13 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 14/24] accel/tcg: Introduce page_find_range_empty Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 16/24] linux-user: " Richard Henderson
` (8 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent, Warner Losh, Kyle Evans
Use the interval tree to find empty space, rather than
probing each page in turn.
Cc: Warner Losh <imp@bsdimp.com>
Cc: Kyle Evans <kevans@freebsd.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
bsd-user/mmap.c | 47 ++++++-----------------------------------------
1 file changed, 6 insertions(+), 41 deletions(-)
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index 07b5b8055e..0d06bd457d 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -222,50 +222,15 @@ unsigned long last_brk;
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong alignment)
{
- abi_ulong addr;
- abi_ulong end_addr;
- int prot;
- int looped = 0;
+ abi_ulong ret;
- if (size > reserved_va) {
- return (abi_ulong)-1;
+ ret = page_find_range_empty(start, reserved_va, size, align);
+ if (ret == -1 && start > TARGET_PAGE_SIZE) {
+ /* Restart at the beginning of the address space. */
+ ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1, size, align);
}
- size = HOST_PAGE_ALIGN(size) + alignment;
- end_addr = start + size;
- if (end_addr > reserved_va) {
- end_addr = reserved_va + 1;
- }
- addr = end_addr - qemu_host_page_size;
-
- while (1) {
- if (addr > end_addr) {
- if (looped) {
- return (abi_ulong)-1;
- }
- end_addr = reserved_va + 1;
- addr = end_addr - qemu_host_page_size;
- looped = 1;
- continue;
- }
- prot = page_get_flags(addr);
- if (prot) {
- end_addr = addr;
- }
- if (end_addr - addr >= size) {
- break;
- }
- addr -= qemu_host_page_size;
- }
-
- if (start == mmap_next_start) {
- mmap_next_start = addr;
- }
- /* addr is sufficiently low to align it up */
- if (alignment != 0) {
- addr = (addr + alignment) & ~(alignment - 1);
- }
- return addr;
+ return ret;
}
/*
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 16/24] linux-user: Use page_find_range_empty for mmap_find_vma_reserved
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (14 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 15/24] bsd-user: Use page_find_range_empty for mmap_find_vma_reserved Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 17/24] linux-user: Use 'last' instead of 'end' in target_mmap Richard Henderson
` (7 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Use the interval tree to find empty space, rather than
probing each page in turn.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 46 ++++++----------------------------------------
1 file changed, 6 insertions(+), 40 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index e332555dd2..6ecdf9e56d 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -317,49 +317,15 @@ unsigned long last_brk;
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong align)
{
- abi_ulong addr, end_addr, incr = qemu_host_page_size;
- int prot;
- bool looped = false;
+ target_ulong ret;
- if (size > reserved_va) {
- return (abi_ulong)-1;
+ ret = page_find_range_empty(start, reserved_va, size, align);
+ if (ret == -1 && start > mmap_min_addr) {
+ /* Restart at the beginning of the address space. */
+ ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
}
- /* Note that start and size have already been aligned by mmap_find_vma. */
-
- end_addr = start + size;
- if (start > reserved_va - size) {
- /* Start at the top of the address space. */
- end_addr = ((reserved_va + 1 - size) & -align) + size;
- looped = true;
- }
-
- /* Search downward from END_ADDR, checking to see if a page is in use. */
- addr = end_addr;
- while (1) {
- addr -= incr;
- if (addr > end_addr) {
- if (looped) {
- /* Failure. The entire address space has been searched. */
- return (abi_ulong)-1;
- }
- /* Re-start at the top of the address space. */
- addr = end_addr = ((reserved_va + 1 - size) & -align) + size;
- looped = true;
- } else {
- prot = page_get_flags(addr);
- if (prot) {
- /* Page in use. Restart below this page. */
- addr = end_addr = ((addr - size) & -align) + size;
- } else if (addr && addr + size == end_addr) {
- /* Success! All pages between ADDR and END_ADDR are free. */
- if (start == mmap_next_start) {
- mmap_next_start = addr;
- }
- return addr;
- }
- }
- }
+ return ret;
}
/*
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 17/24] linux-user: Use 'last' instead of 'end' in target_mmap
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (15 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 16/24] linux-user: " Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 18/24] linux-user: Rewrite mmap_reserve Richard Henderson
` (6 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Complete the transition within the mmap functions to a formulation
that does not overflow at the end of the address space.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 45 +++++++++++++++++++++++----------------------
1 file changed, 23 insertions(+), 22 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 6ecdf9e56d..67a117823f 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -455,8 +455,8 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
int flags, int fd, off_t offset)
{
- abi_ulong ret, end, real_start, real_end, retaddr, host_len,
- passthrough_start = -1, passthrough_end = -1;
+ abi_ulong ret, last, real_start, real_last, retaddr, host_len;
+ abi_ulong passthrough_start = -1, passthrough_last = 0;
int page_flags;
off_t host_offset;
@@ -580,29 +580,30 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
host_start += offset - host_offset;
}
start = h2g(host_start);
+ last = start + len - 1;
passthrough_start = start;
- passthrough_end = start + len;
+ passthrough_last = last;
} else {
if (start & ~TARGET_PAGE_MASK) {
errno = EINVAL;
goto fail;
}
- end = start + len;
- real_end = HOST_PAGE_ALIGN(end);
+ last = start + len - 1;
+ real_last = HOST_PAGE_ALIGN(last) - 1;
/*
* Test if requested memory area fits target address space
* It can fail only on 64-bit host with 32-bit target.
* On any other target/host host mmap() handles this error correctly.
*/
- if (end < start || !guest_range_valid_untagged(start, len)) {
+ if (last < start || !guest_range_valid_untagged(start, len)) {
errno = ENOMEM;
goto fail;
}
/* Validate that the chosen range is empty. */
if ((flags & MAP_FIXED_NOREPLACE)
- && !page_check_range_empty(start, end - 1)) {
+ && !page_check_range_empty(start, last)) {
errno = EEXIST;
goto fail;
}
@@ -642,9 +643,9 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
/* handle the start of the mapping */
if (start > real_start) {
- if (real_end == real_start + qemu_host_page_size) {
+ if (real_last == real_start + qemu_host_page_size - 1) {
/* one single host page */
- if (!mmap_frag(real_start, start, end - 1,
+ if (!mmap_frag(real_start, start, last,
target_prot, flags, fd, offset)) {
goto fail;
}
@@ -658,18 +659,18 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
real_start += qemu_host_page_size;
}
/* handle the end of the mapping */
- if (end < real_end) {
- if (!mmap_frag(real_end - qemu_host_page_size,
- real_end - qemu_host_page_size, end - 1,
+ if (last < real_last) {
+ abi_ulong real_page = real_last - qemu_host_page_size + 1;
+ if (!mmap_frag(real_page, real_page, last,
target_prot, flags, fd,
- offset + real_end - qemu_host_page_size - start)) {
+ offset + real_page - start)) {
goto fail;
}
- real_end -= qemu_host_page_size;
+ real_last -= qemu_host_page_size;
}
/* map the middle (easier) */
- if (real_start < real_end) {
+ if (real_start < real_last) {
void *p;
off_t offset1;
@@ -678,13 +679,13 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
} else {
offset1 = offset + real_start - start;
}
- p = mmap(g2h_untagged(real_start), real_end - real_start,
+ p = mmap(g2h_untagged(real_start), real_last - real_start + 1,
target_to_host_prot(target_prot), flags, fd, offset1);
if (p == MAP_FAILED) {
goto fail;
}
passthrough_start = real_start;
- passthrough_end = real_end;
+ passthrough_last = real_last;
}
}
the_end1:
@@ -692,16 +693,16 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
page_flags |= PAGE_ANON;
}
page_flags |= PAGE_RESET;
- if (passthrough_start == passthrough_end) {
- page_set_flags(start, start + len - 1, page_flags);
+ if (passthrough_start > passthrough_last) {
+ page_set_flags(start, last, page_flags);
} else {
if (start < passthrough_start) {
page_set_flags(start, passthrough_start - 1, page_flags);
}
- page_set_flags(passthrough_start, passthrough_end - 1,
+ page_set_flags(passthrough_start, passthrough_last,
page_flags | PAGE_PASSTHROUGH);
- if (passthrough_end < start + len) {
- page_set_flags(passthrough_end, start + len - 1, page_flags);
+ if (passthrough_last < last) {
+ page_set_flags(passthrough_last + 1, last, page_flags);
}
}
the_end:
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 18/24] linux-user: Rewrite mmap_reserve
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (16 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 17/24] linux-user: Use 'last' instead of 'end' in target_mmap Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 19/24] linux-user: Rename mmap_reserve to mmap_reserve_or_unmap Richard Henderson
` (5 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Use 'last' variables instead of 'end' variables; be careful
about avoiding overflow. Assert that the mmap succeeded.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 68 +++++++++++++++++++++++++++++------------------
1 file changed, 42 insertions(+), 26 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 67a117823f..6b030dac42 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -722,47 +722,63 @@ fail:
return -1;
}
-static void mmap_reserve(abi_ulong start, abi_ulong size)
+static void mmap_reserve(abi_ulong start, abi_ulong len)
{
abi_ulong real_start;
- abi_ulong real_end;
- abi_ulong addr;
- abi_ulong end;
+ abi_ulong real_last;
+ abi_ulong real_len;
+ abi_ulong last;
+ abi_ulong a;
+ void *host_start, *ptr;
int prot;
+ last = start + len - 1;
real_start = start & qemu_host_page_mask;
- real_end = HOST_PAGE_ALIGN(start + size);
- end = start + size;
- if (start > real_start) {
- /* handle host page containing start */
+ real_last = HOST_PAGE_ALIGN(last) - 1;
+
+ /*
+ * If guest pages remain on the first or last host pages,
+ * adjust the deallocation to retain those guest pages.
+ * The single page special case is required for the last page,
+ * lest real_start overflow to zero.
+ */
+ if (real_last - real_start < qemu_host_page_size) {
prot = 0;
- for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
+ for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a);
}
- if (real_end == real_start + qemu_host_page_size) {
- for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- end = real_end;
+ for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a + 1);
+ }
+ if (prot != 0) {
+ return;
+ }
+ } else {
+ for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a);
}
if (prot != 0) {
real_start += qemu_host_page_size;
}
- }
- if (end < real_end) {
- prot = 0;
- for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
+
+ for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a + 1);
}
if (prot != 0) {
- real_end -= qemu_host_page_size;
+ real_last -= qemu_host_page_size;
+ }
+
+ if (real_last < real_start) {
+ return;
}
}
- if (real_start != real_end) {
- mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
- MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
- -1, 0);
- }
+
+ real_len = real_last - real_start + 1;
+ host_start = g2h_untagged(real_start);
+
+ ptr = mmap(host_start, real_len, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+ assert(ptr == host_start);
}
int target_munmap(abi_ulong start, abi_ulong len)
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 19/24] linux-user: Rename mmap_reserve to mmap_reserve_or_unmap
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (17 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 18/24] linux-user: Rewrite mmap_reserve Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 20/24] linux-user: Simplify target_munmap Richard Henderson
` (4 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
If !reserved_va, munmap instead and assert success.
Update all callers.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 29 ++++++++++++++++-------------
1 file changed, 16 insertions(+), 13 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 6b030dac42..8c90a690dd 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -722,14 +722,14 @@ fail:
return -1;
}
-static void mmap_reserve(abi_ulong start, abi_ulong len)
+static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
{
abi_ulong real_start;
abi_ulong real_last;
abi_ulong real_len;
abi_ulong last;
abi_ulong a;
- void *host_start, *ptr;
+ void *host_start;
int prot;
last = start + len - 1;
@@ -776,9 +776,15 @@ static void mmap_reserve(abi_ulong start, abi_ulong len)
real_len = real_last - real_start + 1;
host_start = g2h_untagged(real_start);
- ptr = mmap(host_start, real_len, PROT_NONE,
- MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
- assert(ptr == host_start);
+ if (reserved_va) {
+ void *ptr = mmap(host_start, real_len, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS
+ | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+ assert(ptr == host_start);
+ } else {
+ int ret = munmap(host_start, real_len);
+ assert(ret == 0);
+ }
}
int target_munmap(abi_ulong start, abi_ulong len)
@@ -830,11 +836,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
ret = 0;
/* unmap what we can */
if (real_start < real_end) {
- if (reserved_va) {
- mmap_reserve(real_start, real_end - real_start);
- } else {
- ret = munmap(g2h_untagged(real_start), real_end - real_start);
- }
+ mmap_reserve_or_unmap(real_start, real_end - real_start);
}
if (ret == 0) {
@@ -871,7 +873,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
* If new and old addresses overlap then the above mremap will
* already have failed with EINVAL.
*/
- mmap_reserve(old_addr, old_size);
+ mmap_reserve_or_unmap(old_addr, old_size);
}
} else if (flags & MREMAP_MAYMOVE) {
abi_ulong mmap_start;
@@ -886,7 +888,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
flags | MREMAP_FIXED,
g2h_untagged(mmap_start));
if (reserved_va) {
- mmap_reserve(old_addr, old_size);
+ mmap_reserve_or_unmap(old_addr, old_size);
}
}
} else {
@@ -912,7 +914,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
errno = ENOMEM;
host_addr = MAP_FAILED;
} else if (reserved_va && old_size > new_size) {
- mmap_reserve(old_addr + old_size, old_size - new_size);
+ mmap_reserve_or_unmap(old_addr + old_size,
+ old_size - new_size);
}
}
} else {
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 20/24] linux-user: Simplify target_munmap
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (18 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 19/24] linux-user: Rename mmap_reserve to mmap_reserve_or_unmap Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 21/24] accel/tcg: Accept more page flags in page_check_range Richard Henderson
` (3 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
All of the guest to host page adjustment is handled by
mmap_reserve_or_unmap; there is no need to duplicate that.
There are no failure modes for munmap after alignment and
guest address range have been validated.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 47 ++++-------------------------------------------
1 file changed, 4 insertions(+), 43 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 8c90a690dd..e6463ecd8d 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -789,9 +789,6 @@ static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
int target_munmap(abi_ulong start, abi_ulong len)
{
- abi_ulong end, real_start, real_end, addr;
- int prot, ret;
-
trace_target_munmap(start, len);
if (start & ~TARGET_PAGE_MASK) {
@@ -803,47 +800,11 @@ int target_munmap(abi_ulong start, abi_ulong len)
}
mmap_lock();
- end = start + len;
- real_start = start & qemu_host_page_mask;
- real_end = HOST_PAGE_ALIGN(end);
-
- if (start > real_start) {
- /* handle host page containing start */
- prot = 0;
- for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- if (real_end == real_start + qemu_host_page_size) {
- for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- end = real_end;
- }
- if (prot != 0) {
- real_start += qemu_host_page_size;
- }
- }
- if (end < real_end) {
- prot = 0;
- for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- if (prot != 0) {
- real_end -= qemu_host_page_size;
- }
- }
-
- ret = 0;
- /* unmap what we can */
- if (real_start < real_end) {
- mmap_reserve_or_unmap(real_start, real_end - real_start);
- }
-
- if (ret == 0) {
- page_set_flags(start, start + len - 1, 0);
- }
+ mmap_reserve_or_unmap(start, len);
+ page_set_flags(start, start + len - 1, 0);
mmap_unlock();
- return ret;
+
+ return 0;
}
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 21/24] accel/tcg: Accept more page flags in page_check_range
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (19 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 20/24] linux-user: Simplify target_munmap Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 22/24] accel/tcg: Return bool from page_check_range Richard Henderson
` (2 subsequent siblings)
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Only PAGE_WRITE needs special attention, all others can be
handled as we do for PAGE_READ. Adjust the mask.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/user-exec.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index dee4931f86..7a36f95bbc 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -561,8 +561,8 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
}
missing = flags & ~p->flags;
- if (missing & PAGE_READ) {
- ret = -1; /* page not readable */
+ if (missing & ~PAGE_WRITE) {
+ ret = -1; /* page doesn't match */
break;
}
if (missing & PAGE_WRITE) {
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 22/24] accel/tcg: Return bool from page_check_range
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (20 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 21/24] accel/tcg: Accept more page flags in page_check_range Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 23/24] linux-user: Remove can_passthrough_madvise Richard Henderson
2023-06-30 13:21 ` [PATCH 24/24] linux-user: Simplify target_madvise Richard Henderson
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Replace the 0/-1 result with true/false.
Invert the sense of the test of all callers.
Document the function.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
bsd-user/qemu.h | 2 +-
include/exec/cpu-all.h | 13 ++++++++++++-
linux-user/qemu.h | 2 +-
accel/tcg/user-exec.c | 22 +++++++++++-----------
linux-user/syscall.c | 2 +-
target/hppa/op_helper.c | 2 +-
target/riscv/vector_helper.c | 2 +-
target/sparc/ldst_helper.c | 2 +-
accel/tcg/ldst_atomicity.c.inc | 4 ++--
9 files changed, 31 insertions(+), 20 deletions(-)
diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h
index 41d84e0b81..edf9602f9b 100644
--- a/bsd-user/qemu.h
+++ b/bsd-user/qemu.h
@@ -267,7 +267,7 @@ abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2);
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
{
- return page_check_range((target_ulong)addr, size, type) == 0;
+ return page_check_range((target_ulong)addr, size, type);
}
/*
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index e2195c7e3d..30ce44fd90 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -222,7 +222,18 @@ int walk_memory_regions(void *, walk_memory_regions_fn);
int page_get_flags(target_ulong address);
void page_set_flags(target_ulong start, target_ulong last, int flags);
void page_reset_target_data(target_ulong start, target_ulong last);
-int page_check_range(target_ulong start, target_ulong len, int flags);
+
+/**
+ * page_check_range
+ * @start: first byte of range
+ * @len: length of range
+ * @flags: flags required for each page
+ *
+ * Return true if every page in [@start, @start+@len) has @flags set.
+ * Return false if any page is unmapped. Thus testing flags == 0 is
+ * equivalent to testing for flags == PAGE_VALID.
+ */
+bool page_check_range(target_ulong start, target_ulong last, int flags);
/**
* page_check_range_empty:
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 92f9f5af41..a6e8f0f9f6 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -179,7 +179,7 @@ static inline bool access_ok_untagged(int type, abi_ulong addr, abi_ulong size)
: !guest_range_valid_untagged(addr, size)) {
return false;
}
- return page_check_range((target_ulong)addr, size, type) == 0;
+ return page_check_range((target_ulong)addr, size, type);
}
static inline bool access_ok(CPUState *cpu, int type,
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 7a36f95bbc..107a8cda61 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -520,19 +520,19 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
}
}
-int page_check_range(target_ulong start, target_ulong len, int flags)
+bool page_check_range(target_ulong start, target_ulong len, int flags)
{
target_ulong last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
- int ret;
+ bool ret;
if (len == 0) {
- return 0; /* trivial length */
+ return true; /* trivial length */
}
last = start + len - 1;
if (last < start) {
- return -1; /* wrap around */
+ return false; /* wrap around */
}
locked = have_mmap_lock();
@@ -551,33 +551,33 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
p = pageflags_find(start, last);
}
if (!p) {
- ret = -1; /* entire region invalid */
+ ret = false; /* entire region invalid */
break;
}
}
if (start < p->itree.start) {
- ret = -1; /* initial bytes invalid */
+ ret = false; /* initial bytes invalid */
break;
}
missing = flags & ~p->flags;
if (missing & ~PAGE_WRITE) {
- ret = -1; /* page doesn't match */
+ ret = false; /* page doesn't match */
break;
}
if (missing & PAGE_WRITE) {
if (!(p->flags & PAGE_WRITE_ORG)) {
- ret = -1; /* page not writable */
+ ret = false; /* page not writable */
break;
}
/* Asking about writable, but has been protected: undo. */
if (!page_unprotect(start, 0)) {
- ret = -1;
+ ret = false;
break;
}
/* TODO: page_unprotect should take a range, not a single page. */
if (last - start < TARGET_PAGE_SIZE) {
- ret = 0; /* ok */
+ ret = true; /* ok */
break;
}
start += TARGET_PAGE_SIZE;
@@ -585,7 +585,7 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
}
if (last <= p->itree.last) {
- ret = 0; /* ok */
+ ret = true; /* ok */
break;
}
start = p->itree.last + 1;
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 94cb4ad8fb..384dc5a5fe 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -8076,7 +8076,7 @@ static int open_self_maps(CPUArchState *cpu_env, int fd)
max = h2g_valid(max - 1) ?
max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
- if (page_check_range(h2g(min), max - min, flags) == -1) {
+ if (!page_check_range(h2g(min), max - min, flags)) {
continue;
}
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index 32c27c66b2..f25a5a72aa 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -168,7 +168,7 @@ target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr,
uint32_t level, uint32_t want)
{
#ifdef CONFIG_USER_ONLY
- return (page_check_range(addr, 1, want) == 0) ? 1 : 0;
+ return page_check_range(addr, 1, want);
#else
int prot, excp;
hwaddr phys;
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 1e06e7447c..1f9549f168 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -583,7 +583,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
cpu_mmu_index(env, false));
if (host) {
#ifdef CONFIG_USER_ONLY
- if (page_check_range(addr, offset, PAGE_READ) < 0) {
+ if (page_check_range(addr, offset, PAGE_READ)) {
vl = i;
goto ProbeSuccess;
}
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index 981a47d8bb..78b03308ae 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -1191,7 +1191,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
case ASI_PNFL: /* Primary no-fault LE */
case ASI_SNF: /* Secondary no-fault */
case ASI_SNFL: /* Secondary no-fault LE */
- if (page_check_range(addr, size, PAGE_READ) == -1) {
+ if (!page_check_range(addr, size, PAGE_READ)) {
ret = 0;
break;
}
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index de70531a7a..4de0a80492 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -159,7 +159,7 @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*/
- if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
+ if (page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
uint64_t *p = __builtin_assume_aligned(pv, 8);
return *p;
}
@@ -194,7 +194,7 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*/
- if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
+ if (page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
return *p;
}
#endif
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 23/24] linux-user: Remove can_passthrough_madvise
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (21 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 22/24] accel/tcg: Return bool from page_check_range Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
2023-06-30 13:21 ` [PATCH 24/24] linux-user: Simplify target_madvise Richard Henderson
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
Use page_check_range instead, which uses the interval tree
instead of checking each page individually.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 24 +++---------------------
1 file changed, 3 insertions(+), 21 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index e6463ecd8d..a2bef1ebe6 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -898,23 +898,6 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
return new_addr;
}
-static bool can_passthrough_madvise(abi_ulong start, abi_ulong end)
-{
- ulong addr;
-
- if ((start | end) & ~qemu_host_page_mask) {
- return false;
- }
-
- for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- if (!(page_get_flags(addr) & PAGE_PASSTHROUGH)) {
- return false;
- }
- }
-
- return true;
-}
-
abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
{
abi_ulong len, end;
@@ -964,9 +947,8 @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
*
* A straight passthrough for those may not be safe because qemu sometimes
* turns private file-backed mappings into anonymous mappings.
- * can_passthrough_madvise() helps to check if a passthrough is possible by
- * comparing mappings that are known to have the same semantics in the host
- * and the guest. In this case passthrough is safe.
+ * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
+ * same semantics for the host as for the guest.
*
* We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
* return failure if not.
@@ -984,7 +966,7 @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
ret = -EINVAL;
/* fall through */
case MADV_DONTNEED:
- if (can_passthrough_madvise(start, end)) {
+ if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
ret = get_errno(madvise(g2h_untagged(start), len, advice));
if ((advice == MADV_DONTNEED) && (ret == 0)) {
page_reset_target_data(start, start + len - 1);
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread
* [PATCH 24/24] linux-user: Simplify target_madvise
2023-06-30 13:21 [PATCH for-8.1? 00/24] linux-user: mmap range fixes Richard Henderson
` (22 preceding siblings ...)
2023-06-30 13:21 ` [PATCH 23/24] linux-user: Remove can_passthrough_madvise Richard Henderson
@ 2023-06-30 13:21 ` Richard Henderson
23 siblings, 0 replies; 43+ messages in thread
From: Richard Henderson @ 2023-06-30 13:21 UTC (permalink / raw)
To: qemu-devel; +Cc: mjt, laurent
The trivial length 0 check can be moved up, simplifying some
of the other cases. The end < start test is handled by
guest_range_valid_untagged.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 19 ++++---------------
1 file changed, 4 insertions(+), 15 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index a2bef1ebe6..48b83ca8bf 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -900,28 +900,17 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
{
- abi_ulong len, end;
+ abi_ulong len;
int ret = 0;
if (start & ~TARGET_PAGE_MASK) {
return -TARGET_EINVAL;
}
- len = TARGET_PAGE_ALIGN(len_in);
-
- if (len_in && !len) {
- return -TARGET_EINVAL;
- }
-
- end = start + len;
- if (end < start) {
- return -TARGET_EINVAL;
- }
-
- if (end == start) {
+ if (len_in == 0) {
return 0;
}
-
- if (!guest_range_valid_untagged(start, len)) {
+ len = TARGET_PAGE_ALIGN(len_in);
+ if (len == 0 || !guest_range_valid_untagged(start, len)) {
return -TARGET_EINVAL;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 43+ messages in thread