* + mm-execmem-make-the-populate-and-alloc-atomic.patch added to mm-new branch
@ 2026-03-20 19:16 Andrew Morton
0 siblings, 0 replies; only message in thread
From: Andrew Morton @ 2026-03-20 19:16 UTC (permalink / raw)
To: mm-commits, srosek, skardach, rppt, mikrawczyk, gregkh, hmazur,
akpm
The patch titled
Subject: mm/execmem: make the populate and alloc atomic
has been added to the -mm mm-new branch. Its filename is
mm-execmem-make-the-populate-and-alloc-atomic.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-execmem-make-the-populate-and-alloc-atomic.patch
This patch will later appear in the mm-new branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Note, mm-new is a provisional staging ground for work-in-progress
patches, and acceptance into mm-new is a notification for others take
notice and to finish up reviews. Please do not hesitate to respond to
review feedback and post updated versions to replace or incrementally
fixup patches in mm-new.
The mm-new branch of mm.git is not included in linux-next
If a few days of testing in mm-new is successful, the patch will me moved
into mm.git's mm-unstable branch, which is included in linux-next
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via various
branches at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there most days
------------------------------------------------------
From: Hubert Mazur <hmazur@google.com>
Subject: mm/execmem: make the populate and alloc atomic
Date: Fri, 20 Mar 2026 07:57:23 +0000
When a block of memory is requested from the execmem manager it tries to
find a suitable fragment by traversing the free_areas. In case there is
no such block, a new memory area is added to the free_areas and then
allocated to the caller by traversing the free_area tree again.
The above operations of allocation and tree traversal are not atomic hence
another request may consume this newly allocated memory block which
results in the allocation failure for the original request. Such
occurrence can be spotted on devices running the 6.18 kernel during the
parallel modules loading.
To mitigate such resource races execute the cache population and
allocation operations under one mutex lock.
Link: https://lkml.kernel.org/r/20260320075723.779985-1-hmazur@google.com
Signed-off-by: Hubert Mazur <hmazur@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Stanislaw Kardach <skardach@google.com>
Cc: Michal Krawczyk <mikrawczyk@google.com>
Cc: Slawomir Rosek <srosek@google.com>
Cc: Hubert Mazur <hmazur@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
mm/execmem.c | 55 +++++++++++++++++++++++++------------------------
1 file changed, 29 insertions(+), 26 deletions(-)
--- a/mm/execmem.c~mm-execmem-make-the-populate-and-alloc-atomic
+++ a/mm/execmem.c
@@ -203,13 +203,6 @@ static int execmem_cache_add_locked(void
return mas_store_gfp(&mas, (void *)lower, gfp_mask);
}
-static int execmem_cache_add(void *ptr, size_t size, gfp_t gfp_mask)
-{
- guard(mutex)(&execmem_cache.mutex);
-
- return execmem_cache_add_locked(ptr, size, gfp_mask);
-}
-
static bool within_range(struct execmem_range *range, struct ma_state *mas,
size_t size)
{
@@ -225,18 +218,16 @@ static bool within_range(struct execmem_
return false;
}
-static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
+static void *execmem_cache_alloc_locked(struct execmem_range *range, size_t size)
{
struct maple_tree *free_areas = &execmem_cache.free_areas;
struct maple_tree *busy_areas = &execmem_cache.busy_areas;
MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
- struct mutex *mutex = &execmem_cache.mutex;
unsigned long addr, last, area_size = 0;
void *area, *ptr = NULL;
int err;
- mutex_lock(mutex);
mas_for_each(&mas_free, area, ULONG_MAX) {
area_size = mas_range_len(&mas_free);
@@ -245,7 +236,7 @@ static void *__execmem_cache_alloc(struc
}
if (area_size < size)
- goto out_unlock;
+ return NULL;
addr = mas_free.index;
last = mas_free.last;
@@ -254,7 +245,7 @@ static void *__execmem_cache_alloc(struc
mas_set_range(&mas_busy, addr, addr + size - 1);
err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL);
if (err)
- goto out_unlock;
+ return NULL;
mas_store_gfp(&mas_free, NULL, GFP_KERNEL);
if (area_size > size) {
@@ -268,19 +259,25 @@ static void *__execmem_cache_alloc(struc
err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL);
if (err) {
mas_store_gfp(&mas_busy, NULL, GFP_KERNEL);
- goto out_unlock;
+ return NULL;
}
}
ptr = (void *)addr;
-out_unlock:
- mutex_unlock(mutex);
return ptr;
}
-static int execmem_cache_populate(struct execmem_range *range, size_t size)
+static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
+{
+ guard(mutex)(&execmem_cache.mutex);
+
+ return execmem_cache_alloc_locked(range, size);
+}
+
+static void *execmem_cache_populate_alloc(struct execmem_range *range, size_t size)
{
unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
+ struct mutex *mutex = &execmem_cache.mutex;
struct vm_struct *vm;
size_t alloc_size;
int err = -ENOMEM;
@@ -294,7 +291,7 @@ static int execmem_cache_populate(struct
}
if (!p)
- return err;
+ return NULL;
vm = find_vm_area(p);
if (!vm)
@@ -307,33 +304,39 @@ static int execmem_cache_populate(struct
if (err)
goto err_free_mem;
- err = execmem_cache_add(p, alloc_size, GFP_KERNEL);
+ /*
+ * New memory blocks must be allocated and added to the cache
+ * as an atomic operation, otherwise they may be consumed
+ * by a parallel call to the execmem_cache_alloc function.
+ */
+ mutex_lock(mutex);
+ err = execmem_cache_add_locked(p, alloc_size, GFP_KERNEL);
if (err)
goto err_reset_direct_map;
- return 0;
+ p = execmem_cache_alloc_locked(range, size);
+
+ mutex_unlock(mutex);
+
+ return p;
err_reset_direct_map:
+ mutex_unlock(mutex);
execmem_set_direct_map_valid(vm, true);
err_free_mem:
vfree(p);
- return err;
+ return NULL;
}
static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
{
void *p;
- int err;
p = __execmem_cache_alloc(range, size);
if (p)
return p;
- err = execmem_cache_populate(range, size);
- if (err)
- return NULL;
-
- return __execmem_cache_alloc(range, size);
+ return execmem_cache_populate_alloc(range, size);
}
static inline bool is_pending_free(void *ptr)
_
Patches currently in -mm which might be from hmazur@google.com are
mm-execmem-make-the-populate-and-alloc-atomic.patch
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2026-03-20 19:16 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-20 19:16 + mm-execmem-make-the-populate-and-alloc-atomic.patch added to mm-new branch Andrew Morton
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.