From: Hubert Mazur <hmazur@google.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Mike Rapoport <rppt@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Stanislaw Kardach <skardach@google.com>,
Michal Krawczyk <mikrawczyk@google.com>,
Slawomir Rosek <srosek@google.com>,
Ryan Neph <ryanneph@google.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Hubert Mazur <hmazur@google.com>
Subject: [PATCH v1 1/1] mm: fix race condition in the memory management
Date: Thu, 12 Mar 2026 13:14:38 +0000 [thread overview]
Message-ID: <20260312131438.361746-2-hmazur@google.com> (raw)
In-Reply-To: <20260312131438.361746-1-hmazur@google.com>
When 'ARCH_HAS_EXECMEM_ROX' is being enabled the memory management
system will use caching techniques to optimize the allocations. The
logic tries to find the appropriate memory block based on requested
size. This can fail if current allocations is not sufficient hence
kernel allocates a new block large enough in regards to the request.
After the allocation is done, the new block is being added to the
free_areas tree and then - traverses the tree with hope to find the
matching piecie of memory. The operations of allocating new memory and
traversing the tree are not protected by mutex and thus there is a
chance that some other process will "steal" this shiny new block. It's a
classic race condition for resources. Fix this accordingly by moving a
new block of memory to busy fragments instead of free and return the
pointer to memory. This simplifies the allocation logic since we don't
firstly extend the free areas just to take it a bit later. In case the
new memory allocation is required - do it and return to the caller.
Signed-off-by: Hubert Mazur <hmazur@google.com>
---
mm/execmem.c | 36 +++++++++++++++++-------------------
1 file changed, 17 insertions(+), 19 deletions(-)
diff --git a/mm/execmem.c b/mm/execmem.c
index 810a4ba9c924..8aa44d19ec73 100644
--- a/mm/execmem.c
+++ b/mm/execmem.c
@@ -203,13 +203,6 @@ static int execmem_cache_add_locked(void *ptr, size_t size, gfp_t gfp_mask)
return mas_store_gfp(&mas, (void *)lower, gfp_mask);
}
-static int execmem_cache_add(void *ptr, size_t size, gfp_t gfp_mask)
-{
- guard(mutex)(&execmem_cache.mutex);
-
- return execmem_cache_add_locked(ptr, size, gfp_mask);
-}
-
static bool within_range(struct execmem_range *range, struct ma_state *mas,
size_t size)
{
@@ -225,7 +218,7 @@ static bool within_range(struct execmem_range *range, struct ma_state *mas,
return false;
}
-static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
+static void *__execmem_cache_lookup(struct execmem_range *range, size_t size)
{
struct maple_tree *free_areas = &execmem_cache.free_areas;
struct maple_tree *busy_areas = &execmem_cache.busy_areas;
@@ -278,10 +271,12 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
return ptr;
}
-static int execmem_cache_populate(struct execmem_range *range, size_t size)
+static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
{
+ struct maple_tree *busy_areas = &execmem_cache.busy_areas;
unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
struct vm_struct *vm;
+ unsigned long addr;
size_t alloc_size;
int err = -ENOMEM;
void *p;
@@ -294,7 +289,7 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
}
if (!p)
- return err;
+ return NULL;
vm = find_vm_area(p);
if (!vm)
@@ -307,32 +302,35 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
if (err)
goto err_free_mem;
- err = execmem_cache_add(p, alloc_size, GFP_KERNEL);
+ /* Set new allocation as an already busy fragment */
+ addr = (unsigned long)p;
+ MA_STATE(mas, busy_areas, addr - 1, addr + 1);
+ mas_set_range(&mas, addr, addr+size - 1);
+
+ mutex_lock(&execmem_cache.mutex);
+ err = mas_store_gfp(&mas, (void *)addr, GFP_KERNEL);
+ mutex_unlock(&execmem_cache.mutex);
+
if (err)
goto err_reset_direct_map;
- return 0;
+ return p;
err_reset_direct_map:
execmem_set_direct_map_valid(vm, true);
err_free_mem:
vfree(p);
- return err;
+ return NULL;
}
static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
{
void *p;
- int err;
- p = __execmem_cache_alloc(range, size);
+ p = __execmem_cache_lookup(range, size);
if (p)
return p;
- err = execmem_cache_populate(range, size);
- if (err)
- return NULL;
-
return __execmem_cache_alloc(range, size);
}
--
2.53.0.851.ga537e3e6e9-goog
next prev parent reply other threads:[~2026-03-12 13:15 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-12 13:14 [PATCH v1 0/1] Fix race condition in the memory management system Hubert Mazur
2026-03-12 13:14 ` Hubert Mazur [this message]
2026-03-12 13:42 ` [PATCH v1 1/1] mm: fix race condition in the memory management Mike Rapoport
2026-03-12 15:42 ` Hubert Mazur
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260312131438.361746-2-hmazur@google.com \
--to=hmazur@google.com \
--cc=akpm@linux-foundation.org \
--cc=gregkh@linuxfoundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mikrawczyk@google.com \
--cc=rppt@kernel.org \
--cc=ryanneph@google.com \
--cc=skardach@google.com \
--cc=srosek@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox