From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A81383DCD9B for ; Wed, 18 Mar 2026 14:42:01 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1773844921; cv=none; b=Ck669vTfiHrNGRj5mxXEs+ecUYYVs0I1UfIM2DieScnSjVEW7mCrTM63xtXZODJ3PZMO7pxLYVGmQNQWS29lTWfkuEF/fuX9j8nRbPWwAUPgfcMuEQJsmTjM3SajEnS2qNJLonAYMjiSGbAm1xpKexKpCKCjaeKMF8c3DB0ZrjY= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1773844921; c=relaxed/simple; bh=gvbA48VpWYkVOPX5lZqPv7gAo3cly3u7OwHMgGbjqZ0=; h=Date:From:To:Cc:Subject:Message-ID:References:MIME-Version: Content-Type:Content-Disposition:In-Reply-To; b=GWgI7h22wunCncpe2E18RP8rIHkor5UbKYJ4lUhYzn7tHexWMeYmgAuL0OTPRXZtdvit5sF7kugc3TIJCfOhUzGqTj2k+DEzT/941k+ON9OQGtM2C5g7R0B9hyAM+mn8IfkLF/ieXJyFF7GLgcX37SY7RnJdMEJmNUKPf477fIw= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ZweRF6sX; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ZweRF6sX" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E9775C2BCB0; Wed, 18 Mar 2026 14:41:57 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1773844921; bh=gvbA48VpWYkVOPX5lZqPv7gAo3cly3u7OwHMgGbjqZ0=; h=Date:From:To:Cc:Subject:References:In-Reply-To:From; b=ZweRF6sX91Ge+ftPX9NqiTY3bKofWcnje0RqZ0GXbBGuNjj/FDdce+dSPMd3ciI5a +YJjtTOk0Rp/ZxnvrODXkXmaZyisuc3NsjK2AU9wl/IAQtXxny04zdoPxp98m663Ip pHrKypzwH9lMKMR2KMmjpoQMHW+MEApp4lssb5AQIv/9BPbRHwsehQhxznJsLfeOix 48drbi58N196v1dfEm1kwdS0pyVjpZgaxfowuQP6IAf3SAQdLM69+wAHrGRVVEpsOp pt8Rx+Qk/R7cdzyBAfS+KPZ7JLKnZItiewwT7Crw2HI13/Y+NcDzRj/Sd8lx0vZXjU qT81LYKT1xQYQ== Date: Wed, 18 Mar 2026 16:41:54 +0200 From: Mike Rapoport To: Hubert Mazur Cc: Andrew Morton , Greg Kroah-Hartman , Stanislaw Kardach , Michal Krawczyk , Slawomir Rosek , Lukasz Majczak , linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: Re: [PATCH v2 1/1] mm/execmem: Make the populate and alloc atomic Message-ID: References: <20260317125020.1293472-1-hmazur@google.com> <20260317125020.1293472-2-hmazur@google.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20260317125020.1293472-2-hmazur@google.com> Hi Hubert, On Tue, Mar 17, 2026 at 12:50:20PM +0000, Hubert Mazur wrote: > Subject: mm/execmem: Make the populate and alloc atomic > > When a memory block is requested from the execmem manager, it tries > to find a suitable fragment in the free_areas. In case there is no > such block, a new memory area is added to free_areas and then > allocated to the caller. Those two operations must be atomic > to ensure that no other memory request consumes it. > > Signed-off-by: Hubert Mazur > --- > mm/execmem.c | 61 +++++++++++++++++++++++++++++----------------------- > 1 file changed, 34 insertions(+), 27 deletions(-) > > diff --git a/mm/execmem.c b/mm/execmem.c > index 810a4ba9c924..9043f0f8f61a 100644 > --- a/mm/execmem.c > +++ b/mm/execmem.c > @@ -203,13 +203,6 @@ static int execmem_cache_add_locked(void *ptr, size_t size, gfp_t gfp_mask) > return mas_store_gfp(&mas, (void *)lower, gfp_mask); > } > > -static int execmem_cache_add(void *ptr, size_t size, gfp_t gfp_mask) > -{ > - guard(mutex)(&execmem_cache.mutex); > - > - return execmem_cache_add_locked(ptr, size, gfp_mask); > -} > - > static bool within_range(struct execmem_range *range, struct ma_state *mas, > size_t size) > { > @@ -225,18 +218,16 @@ static bool within_range(struct execmem_range *range, struct ma_state *mas, > return false; > } > > -static void *__execmem_cache_alloc(struct execmem_range *range, size_t size) > +static void *__execmem_cache_alloc_locked(struct execmem_range *range, size_t size) No need to keep prefix underscores here. > { > struct maple_tree *free_areas = &execmem_cache.free_areas; > struct maple_tree *busy_areas = &execmem_cache.busy_areas; > MA_STATE(mas_free, free_areas, 0, ULONG_MAX); > MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX); > - struct mutex *mutex = &execmem_cache.mutex; > unsigned long addr, last, area_size = 0; > void *area, *ptr = NULL; > int err; > > - mutex_lock(mutex); > mas_for_each(&mas_free, area, ULONG_MAX) { > area_size = mas_range_len(&mas_free); > > @@ -245,7 +236,7 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size) > } > > if (area_size < size) > - goto out_unlock; > + return NULL; > > addr = mas_free.index; > last = mas_free.last; > @@ -254,7 +245,7 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size) > mas_set_range(&mas_busy, addr, addr + size - 1); > err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL); > if (err) > - goto out_unlock; > + return NULL; > > mas_store_gfp(&mas_free, NULL, GFP_KERNEL); > if (area_size > size) { > @@ -268,19 +259,25 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size) > err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL); > if (err) { > mas_store_gfp(&mas_busy, NULL, GFP_KERNEL); > - goto out_unlock; > + return NULL; > } > } > ptr = (void *)addr; > > -out_unlock: > - mutex_unlock(mutex); > return ptr; > } > > -static int execmem_cache_populate(struct execmem_range *range, size_t size) > +static void *__execmem_cache_alloc(struct execmem_range *range, size_t size) > +{ > + guard(mutex)(&execmem_cache.mutex); > + > + return __execmem_cache_alloc_locked(range, size); > +} > + > +static void *__execmem_cache_populate_alloc(struct execmem_range *range, size_t size) No need for leading underscores here as well. > { > unsigned long vm_flags = VM_ALLOW_HUGE_VMAP; > + struct mutex *mutex = &execmem_cache.mutex; > struct vm_struct *vm; > size_t alloc_size; > int err = -ENOMEM; > @@ -294,7 +291,7 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size) > } > > if (!p) > - return err; > + return NULL; > > vm = find_vm_area(p); > if (!vm) > @@ -307,33 +304,43 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size) > if (err) > goto err_free_mem; > > - err = execmem_cache_add(p, alloc_size, GFP_KERNEL); > - if (err) > + /* > + * New memory blocks must be propagated and allocated as an atomic operation, > + * otherwise it may be consumed by a parallel call to the execmem_cache_alloc > + * function. > + */ Please keep the comment lines under 80 characters. > + mutex_lock(mutex); > + err = execmem_cache_add_locked(p, alloc_size, GFP_KERNEL); > + if (err) { > + mutex_unlock(mutex); > goto err_reset_direct_map; Please add a new label that will unlock the mutex and goto to the new label. > + } > > - return 0; > + p = __execmem_cache_alloc_locked(range, size); > + if (!p) { > + mutex_unlock(mutex); > + return NULL; This if is not needed, we anyway unlock the mutex and return p, if __execmem_cache_alloc_locked() failed p would be NULL anyway. > + } > + mutex_unlock(mutex); > + > + return p; > > err_reset_direct_map: > execmem_set_direct_map_valid(vm, true); > err_free_mem: > vfree(p); > - return err; > + return NULL; > } > > static void *execmem_cache_alloc(struct execmem_range *range, size_t size) > { > void *p; > - int err; > > p = __execmem_cache_alloc(range, size); > if (p) > return p; > > - err = execmem_cache_populate(range, size); > - if (err) > - return NULL; > - > - return __execmem_cache_alloc(range, size); > + return __execmem_cache_populate_alloc(range, size); > } > > static inline bool is_pending_free(void *ptr) > -- > 2.53.0.851.ga537e3e6e9-goog > -- Sincerely yours, Mike.