public inbox for linux-mm@kvack.org
 help / color / mirror / Atom feed
* [PATCH v3] mm/execmem: Make the populate and alloc atomic
@ 2026-03-19  8:59 Hubert Mazur
  2026-03-19 17:19 ` Mike Rapoport
  0 siblings, 1 reply; 2+ messages in thread
From: Hubert Mazur @ 2026-03-19  8:59 UTC (permalink / raw)
  To: Andrew Morton, Mike Rapoport
  Cc: Greg Kroah-Hartman, Stanislaw Kardach, Michal Krawczyk,
	Slawomir Rosek, Lukasz Majczak, linux-mm, linux-kernel,
	Hubert Mazur

When a memory block is requested from the execmem manager, it tries
to find a suitable fragment in the free_areas. In case there is no
such block, a new memory area is added to free_areas and then
allocated to the caller. Those two operations must be atomic
to ensure that no other memory request consumes new block.

Signed-off-by: Hubert Mazur <hmazur@google.com>
---
Changes in v3:
- Addressed the maintainer comments regarding style issues
- Removed unnecessary conditional statement

Changes in v2:
The __execmem_cache_alloc_locked function (lockless version of
__execmem_cache_alloc) is introduced and called after
execmem_cache_add_locked from the __execmem_cache_populate_alloc
function (renamed from execmem_cache_populate). Both calls are
guarded now with a single mutex.

Link to v2:
https://lore.kernel.org/all/20260317125020.1293472-2-hmazur@google.com/

Changes in v1:
Allocate new memory fragment and assign it directly to the busy_areas
inside execmem_cache_populate function.

Link to v1:
https://lore.kernel.org/all/20260312131438.361746-1-hmazur@google.com/T/#t

 mm/execmem.c | 55 +++++++++++++++++++++++++++-------------------------
 1 file changed, 29 insertions(+), 26 deletions(-)

diff --git a/mm/execmem.c b/mm/execmem.c
index 810a4ba9c924..4477bb9209ab 100644
--- a/mm/execmem.c
+++ b/mm/execmem.c
@@ -203,13 +203,6 @@ static int execmem_cache_add_locked(void *ptr, size_t size, gfp_t gfp_mask)
 	return mas_store_gfp(&mas, (void *)lower, gfp_mask);
 }
 
-static int execmem_cache_add(void *ptr, size_t size, gfp_t gfp_mask)
-{
-	guard(mutex)(&execmem_cache.mutex);
-
-	return execmem_cache_add_locked(ptr, size, gfp_mask);
-}
-
 static bool within_range(struct execmem_range *range, struct ma_state *mas,
 			 size_t size)
 {
@@ -225,18 +218,16 @@ static bool within_range(struct execmem_range *range, struct ma_state *mas,
 	return false;
 }
 
-static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
+static void *execmem_cache_alloc_locked(struct execmem_range *range, size_t size)
 {
 	struct maple_tree *free_areas = &execmem_cache.free_areas;
 	struct maple_tree *busy_areas = &execmem_cache.busy_areas;
 	MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
 	MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
-	struct mutex *mutex = &execmem_cache.mutex;
 	unsigned long addr, last, area_size = 0;
 	void *area, *ptr = NULL;
 	int err;
 
-	mutex_lock(mutex);
 	mas_for_each(&mas_free, area, ULONG_MAX) {
 		area_size = mas_range_len(&mas_free);
 
@@ -245,7 +236,7 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
 	}
 
 	if (area_size < size)
-		goto out_unlock;
+		return NULL;
 
 	addr = mas_free.index;
 	last = mas_free.last;
@@ -254,7 +245,7 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
 	mas_set_range(&mas_busy, addr, addr + size - 1);
 	err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL);
 	if (err)
-		goto out_unlock;
+		return NULL;
 
 	mas_store_gfp(&mas_free, NULL, GFP_KERNEL);
 	if (area_size > size) {
@@ -268,19 +259,25 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
 		err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL);
 		if (err) {
 			mas_store_gfp(&mas_busy, NULL, GFP_KERNEL);
-			goto out_unlock;
+			return NULL;
 		}
 	}
 	ptr = (void *)addr;
 
-out_unlock:
-	mutex_unlock(mutex);
 	return ptr;
 }
 
-static int execmem_cache_populate(struct execmem_range *range, size_t size)
+static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
+{
+	guard(mutex)(&execmem_cache.mutex);
+
+	return execmem_cache_alloc_locked(range, size);
+}
+
+static void *execmem_cache_populate_alloc(struct execmem_range *range, size_t size)
 {
 	unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
+	struct mutex *mutex = &execmem_cache.mutex;
 	struct vm_struct *vm;
 	size_t alloc_size;
 	int err = -ENOMEM;
@@ -294,7 +291,7 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
 	}
 
 	if (!p)
-		return err;
+		return NULL;
 
 	vm = find_vm_area(p);
 	if (!vm)
@@ -307,33 +304,39 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
 	if (err)
 		goto err_free_mem;
 
-	err = execmem_cache_add(p, alloc_size, GFP_KERNEL);
+	/*
+	 * New memory blocks must be propagated and allocated as an atomic
+	 * operation, otherwise it may be consumed by a parallel call
+	 * to the execmem_cache_alloc function.
+	 */
+	mutex_lock(mutex);
+	err = execmem_cache_add_locked(p, alloc_size, GFP_KERNEL);
 	if (err)
 		goto err_reset_direct_map;
 
-	return 0;
+	p = execmem_cache_alloc_locked(range, size);
+
+	mutex_unlock(mutex);
+
+	return p;
 
 err_reset_direct_map:
+	mutex_unlock(mutex);
 	execmem_set_direct_map_valid(vm, true);
 err_free_mem:
 	vfree(p);
-	return err;
+	return NULL;
 }
 
 static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
 {
 	void *p;
-	int err;
 
 	p = __execmem_cache_alloc(range, size);
 	if (p)
 		return p;
 
-	err = execmem_cache_populate(range, size);
-	if (err)
-		return NULL;
-
-	return __execmem_cache_alloc(range, size);
+	return execmem_cache_populate_alloc(range, size);
 }
 
 static inline bool is_pending_free(void *ptr)
-- 
2.53.0.851.ga537e3e6e9-goog



^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH v3] mm/execmem: Make the populate and alloc atomic
  2026-03-19  8:59 [PATCH v3] mm/execmem: Make the populate and alloc atomic Hubert Mazur
@ 2026-03-19 17:19 ` Mike Rapoport
  0 siblings, 0 replies; 2+ messages in thread
From: Mike Rapoport @ 2026-03-19 17:19 UTC (permalink / raw)
  To: Hubert Mazur
  Cc: Andrew Morton, Greg Kroah-Hartman, Stanislaw Kardach,
	Michal Krawczyk, Slawomir Rosek, Lukasz Majczak, linux-mm,
	linux-kernel

On Thu, Mar 19, 2026 at 08:59:07AM +0000, Hubert Mazur wrote:
> When a memory block is requested from the execmem manager, it tries
> to find a suitable fragment in the free_areas. In case there is no
> such block, a new memory area is added to free_areas and then
> allocated to the caller. Those two operations must be atomic
> to ensure that no other memory request consumes new block.

Sorry if I was not clear, the motivation for the patch you had in the
cover letter in v2 should have been put in the commit message rather than
completely dropped.
 
> Signed-off-by: Hubert Mazur <hmazur@google.com>
> ---
> Changes in v3:
> - Addressed the maintainer comments regarding style issues
> - Removed unnecessary conditional statement
> 
> Changes in v2:
> The __execmem_cache_alloc_locked function (lockless version of
> __execmem_cache_alloc) is introduced and called after
> execmem_cache_add_locked from the __execmem_cache_populate_alloc
> function (renamed from execmem_cache_populate). Both calls are
> guarded now with a single mutex.
> 
> Link to v2:
> https://lore.kernel.org/all/20260317125020.1293472-2-hmazur@google.com/
> 
> Changes in v1:
> Allocate new memory fragment and assign it directly to the busy_areas
> inside execmem_cache_populate function.
> 
> Link to v1:
> https://lore.kernel.org/all/20260312131438.361746-1-hmazur@google.com/T/#t
> 
>  mm/execmem.c | 55 +++++++++++++++++++++++++++-------------------------
>  1 file changed, 29 insertions(+), 26 deletions(-)
> 
> diff --git a/mm/execmem.c b/mm/execmem.c
> index 810a4ba9c924..4477bb9209ab 100644
> --- a/mm/execmem.c
> +++ b/mm/execmem.c
> @@ -203,13 +203,6 @@ static int execmem_cache_add_locked(void *ptr, size_t size, gfp_t gfp_mask)
>  	return mas_store_gfp(&mas, (void *)lower, gfp_mask);
>  }
>  
> -static int execmem_cache_add(void *ptr, size_t size, gfp_t gfp_mask)
> -{
> -	guard(mutex)(&execmem_cache.mutex);
> -
> -	return execmem_cache_add_locked(ptr, size, gfp_mask);
> -}
> -
>  static bool within_range(struct execmem_range *range, struct ma_state *mas,
>  			 size_t size)
>  {
> @@ -225,18 +218,16 @@ static bool within_range(struct execmem_range *range, struct ma_state *mas,
>  	return false;
>  }
>  
> -static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
> +static void *execmem_cache_alloc_locked(struct execmem_range *range, size_t size)
>  {
>  	struct maple_tree *free_areas = &execmem_cache.free_areas;
>  	struct maple_tree *busy_areas = &execmem_cache.busy_areas;
>  	MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
>  	MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
> -	struct mutex *mutex = &execmem_cache.mutex;
>  	unsigned long addr, last, area_size = 0;
>  	void *area, *ptr = NULL;
>  	int err;
>  
> -	mutex_lock(mutex);
>  	mas_for_each(&mas_free, area, ULONG_MAX) {
>  		area_size = mas_range_len(&mas_free);
>  
> @@ -245,7 +236,7 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
>  	}
>  
>  	if (area_size < size)
> -		goto out_unlock;
> +		return NULL;
>  
>  	addr = mas_free.index;
>  	last = mas_free.last;
> @@ -254,7 +245,7 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
>  	mas_set_range(&mas_busy, addr, addr + size - 1);
>  	err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL);
>  	if (err)
> -		goto out_unlock;
> +		return NULL;
>  
>  	mas_store_gfp(&mas_free, NULL, GFP_KERNEL);
>  	if (area_size > size) {
> @@ -268,19 +259,25 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
>  		err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL);
>  		if (err) {
>  			mas_store_gfp(&mas_busy, NULL, GFP_KERNEL);
> -			goto out_unlock;
> +			return NULL;
>  		}
>  	}
>  	ptr = (void *)addr;
>  
> -out_unlock:
> -	mutex_unlock(mutex);
>  	return ptr;
>  }
>  
> -static int execmem_cache_populate(struct execmem_range *range, size_t size)
> +static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
> +{
> +	guard(mutex)(&execmem_cache.mutex);
> +
> +	return execmem_cache_alloc_locked(range, size);
> +}
> +
> +static void *execmem_cache_populate_alloc(struct execmem_range *range, size_t size)
>  {
>  	unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
> +	struct mutex *mutex = &execmem_cache.mutex;
>  	struct vm_struct *vm;
>  	size_t alloc_size;
>  	int err = -ENOMEM;
> @@ -294,7 +291,7 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
>  	}
>  
>  	if (!p)
> -		return err;
> +		return NULL;
>  
>  	vm = find_vm_area(p);
>  	if (!vm)
> @@ -307,33 +304,39 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
>  	if (err)
>  		goto err_free_mem;
>  
> -	err = execmem_cache_add(p, alloc_size, GFP_KERNEL);
> +	/*
> +	 * New memory blocks must be propagated and allocated as an atomic

Nit:                               ^ allocated and added to the cache

> +	 * operation, otherwise it may be consumed by a parallel call

                               ^ they

> +	 * to the execmem_cache_alloc function.
> +	 */
> +	mutex_lock(mutex);
> +	err = execmem_cache_add_locked(p, alloc_size, GFP_KERNEL);
>  	if (err)
>  		goto err_reset_direct_map;
>  
> -	return 0;
> +	p = execmem_cache_alloc_locked(range, size);
> +
> +	mutex_unlock(mutex);
> +
> +	return p;
>  
>  err_reset_direct_map:
> +	mutex_unlock(mutex);
>  	execmem_set_direct_map_valid(vm, true);
>  err_free_mem:
>  	vfree(p);
> -	return err;
> +	return NULL;
>  }
>  
>  static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
>  {
>  	void *p;
> -	int err;
>  
>  	p = __execmem_cache_alloc(range, size);
>  	if (p)
>  		return p;
>  
> -	err = execmem_cache_populate(range, size);
> -	if (err)
> -		return NULL;
> -
> -	return __execmem_cache_alloc(range, size);
> +	return execmem_cache_populate_alloc(range, size);
>  }
>  
>  static inline bool is_pending_free(void *ptr)
> -- 
> 2.53.0.851.ga537e3e6e9-goog
> 

-- 
Sincerely yours,
Mike.


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2026-03-19 17:19 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-19  8:59 [PATCH v3] mm/execmem: Make the populate and alloc atomic Hubert Mazur
2026-03-19 17:19 ` Mike Rapoport

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox