From: Hubert Mazur <hmazur@google.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Mike Rapoport <rppt@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Stanislaw Kardach <skardach@google.com>,
Michal Krawczyk <mikrawczyk@google.com>,
Slawomir Rosek <srosek@google.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Hubert Mazur <hmazur@google.com>
Subject: [PATCH v4] mm/execmem: Make the populate and alloc atomic
Date: Fri, 20 Mar 2026 07:57:23 +0000 [thread overview]
Message-ID: <20260320075723.779985-1-hmazur@google.com> (raw)
When a block of memory is requested from the execmem manager it tries
to find a suitable fragment by traversing the free_areas. In case
there is no such block, a new memory area is added to the free_areas
and then allocated to the caller by traversing the free_area tree again.
The above operations of allocation and tree traversal are not atomic
hence another request may consume this newly allocated memory block
which results in the allocation failure for the original request.
Such occurrence can be spotted on devices running the 6.18 kernel
during the parallel modules loading.
To mitigate such resource races execute the cache population and
allocation operations under one mutex lock.
Signed-off-by: Hubert Mazur <hmazur@google.com>
---
Changes in v4:
- Fixed typos in the source code comments
- Extended the commit message with rationale behind introducing the change
Changes in v3:
- Addressed the maintainer comments regarding style issues
- Removed unnecessary conditional statement
Link to v3:
https://lore.kernel.org/all/20260319085907.3510446-1-hmazur@google.com/
Changes in v2:
The __execmem_cache_alloc_locked function (lockless version of
__execmem_cache_alloc) is introduced and called after
execmem_cache_add_locked from the __execmem_cache_populate_alloc
function (renamed from execmem_cache_populate). Both calls are
guarded now with a single mutex.
Link to v2:
https://lore.kernel.org/all/20260317125020.1293472-2-hmazur@google.com/
Changes in v1:
Allocate new memory fragment and assign it directly to the busy_areas
inside execmem_cache_populate function.
Link to v1:
https://lore.kernel.org/all/20260312131438.361746-1-hmazur@google.com/T/#t
mm/execmem.c | 55 +++++++++++++++++++++++++++-------------------------
1 file changed, 29 insertions(+), 26 deletions(-)
diff --git a/mm/execmem.c b/mm/execmem.c
index 810a4ba9c924..084a207e4278 100644
--- a/mm/execmem.c
+++ b/mm/execmem.c
@@ -203,13 +203,6 @@ static int execmem_cache_add_locked(void *ptr, size_t size, gfp_t gfp_mask)
return mas_store_gfp(&mas, (void *)lower, gfp_mask);
}
-static int execmem_cache_add(void *ptr, size_t size, gfp_t gfp_mask)
-{
- guard(mutex)(&execmem_cache.mutex);
-
- return execmem_cache_add_locked(ptr, size, gfp_mask);
-}
-
static bool within_range(struct execmem_range *range, struct ma_state *mas,
size_t size)
{
@@ -225,18 +218,16 @@ static bool within_range(struct execmem_range *range, struct ma_state *mas,
return false;
}
-static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
+static void *execmem_cache_alloc_locked(struct execmem_range *range, size_t size)
{
struct maple_tree *free_areas = &execmem_cache.free_areas;
struct maple_tree *busy_areas = &execmem_cache.busy_areas;
MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
- struct mutex *mutex = &execmem_cache.mutex;
unsigned long addr, last, area_size = 0;
void *area, *ptr = NULL;
int err;
- mutex_lock(mutex);
mas_for_each(&mas_free, area, ULONG_MAX) {
area_size = mas_range_len(&mas_free);
@@ -245,7 +236,7 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
}
if (area_size < size)
- goto out_unlock;
+ return NULL;
addr = mas_free.index;
last = mas_free.last;
@@ -254,7 +245,7 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
mas_set_range(&mas_busy, addr, addr + size - 1);
err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL);
if (err)
- goto out_unlock;
+ return NULL;
mas_store_gfp(&mas_free, NULL, GFP_KERNEL);
if (area_size > size) {
@@ -268,19 +259,25 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL);
if (err) {
mas_store_gfp(&mas_busy, NULL, GFP_KERNEL);
- goto out_unlock;
+ return NULL;
}
}
ptr = (void *)addr;
-out_unlock:
- mutex_unlock(mutex);
return ptr;
}
-static int execmem_cache_populate(struct execmem_range *range, size_t size)
+static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
+{
+ guard(mutex)(&execmem_cache.mutex);
+
+ return execmem_cache_alloc_locked(range, size);
+}
+
+static void *execmem_cache_populate_alloc(struct execmem_range *range, size_t size)
{
unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
+ struct mutex *mutex = &execmem_cache.mutex;
struct vm_struct *vm;
size_t alloc_size;
int err = -ENOMEM;
@@ -294,7 +291,7 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
}
if (!p)
- return err;
+ return NULL;
vm = find_vm_area(p);
if (!vm)
@@ -307,33 +304,39 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
if (err)
goto err_free_mem;
- err = execmem_cache_add(p, alloc_size, GFP_KERNEL);
+ /*
+ * New memory blocks must be allocated and added to the cache
+ * as an atomic operation, otherwise they may be consumed
+ * by a parallel call to the execmem_cache_alloc function.
+ */
+ mutex_lock(mutex);
+ err = execmem_cache_add_locked(p, alloc_size, GFP_KERNEL);
if (err)
goto err_reset_direct_map;
- return 0;
+ p = execmem_cache_alloc_locked(range, size);
+
+ mutex_unlock(mutex);
+
+ return p;
err_reset_direct_map:
+ mutex_unlock(mutex);
execmem_set_direct_map_valid(vm, true);
err_free_mem:
vfree(p);
- return err;
+ return NULL;
}
static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
{
void *p;
- int err;
p = __execmem_cache_alloc(range, size);
if (p)
return p;
- err = execmem_cache_populate(range, size);
- if (err)
- return NULL;
-
- return __execmem_cache_alloc(range, size);
+ return execmem_cache_populate_alloc(range, size);
}
static inline bool is_pending_free(void *ptr)
--
2.53.0.959.g497ff81fa9-goog
next reply other threads:[~2026-03-20 7:57 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-20 7:57 Hubert Mazur [this message]
2026-03-22 11:01 ` [PATCH v4] mm/execmem: Make the populate and alloc atomic Mike Rapoport
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260320075723.779985-1-hmazur@google.com \
--to=hmazur@google.com \
--cc=akpm@linux-foundation.org \
--cc=gregkh@linuxfoundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mikrawczyk@google.com \
--cc=rppt@kernel.org \
--cc=skardach@google.com \
--cc=srosek@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox