* [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static
@ 2026-03-23 7:20 Mike Rapoport
2026-03-23 12:18 ` David Hildenbrand (Arm)
` (2 more replies)
0 siblings, 3 replies; 5+ messages in thread
From: Mike Rapoport @ 2026-03-23 7:20 UTC (permalink / raw)
To: Andrew Morton, David Hildenbrand
Cc: Kees Cook, Liam R. Howlett, Lorenzo Stoakes, Michal Hocko,
Mike Rapoport, Suren Baghdasaryan, Vlastimil Babka, linux-kernel,
linux-mm
From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
reserve_bootmem_region() is only called from
memmap_init_reserved_pages() and it was in mm/mm_init.c because of its
dependecies on static init_deferred_page().
Since init_deferred_page() is not static anymore, move
reserve_bootmem_region(), rename it to memmap_init_reserved_range() and
make it static.
Update the comment describing it to better reflect what the function
does and drop bogus comment about reserved pages in free_bootmem_page().
Update memblock test stubs to reflect the core changes.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
include/linux/bootmem_info.h | 4 ----
include/linux/mm.h | 3 ---
mm/memblock.c | 31 ++++++++++++++++++++++++++++---
mm/mm_init.c | 25 -------------------------
tools/include/linux/mm.h | 2 --
tools/testing/memblock/internal.h | 9 +++++++++
tools/testing/memblock/mmzone.c | 4 ----
7 files changed, 37 insertions(+), 41 deletions(-)
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index 4c506e76a808..492ceeb1cdf8 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -44,10 +44,6 @@ static inline void free_bootmem_page(struct page *page)
{
enum bootmem_type type = bootmem_type(page);
- /*
- * The reserve_bootmem_region sets the reserved flag on bootmem
- * pages.
- */
VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
if (type == SECTION_INFO || type == MIX_SECTION_INFO)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index abb4963c1f06..764d10fdfb5d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3686,9 +3686,6 @@ extern unsigned long free_reserved_area(void *start, void *end,
extern void adjust_managed_page_count(struct page *page, long count);
-extern void reserve_bootmem_region(phys_addr_t start,
- phys_addr_t end, int nid);
-
/* Free the reserved page into the buddy system, so it gets managed. */
void free_reserved_page(struct page *page);
diff --git a/mm/memblock.c b/mm/memblock.c
index b3ddfdec7a80..d504205cdbf5 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -973,7 +973,7 @@ __init void memmap_init_kho_scratch_pages(void)
/*
* Initialize struct pages for free scratch memory.
* The struct pages for reserved scratch memory will be set up in
- * reserve_bootmem_region()
+ * memmap_init_reserved_pages()
*/
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
@@ -2240,6 +2240,31 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
return end_pfn - start_pfn;
}
+/*
+ * Initialised pages do not have PageReserved set. This function is called
+ * for each reserved range and marks the pages PageReserved.
+ * When deferred initialization of struct pages is enabled it also ensures
+ * that struct pages are properly initialised.
+ */
+static void __init memmap_init_reserved_range(phys_addr_t start,
+ phys_addr_t end, int nid)
+{
+ unsigned long pfn;
+
+ for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
+ struct page *page = pfn_to_page(pfn);
+
+ init_deferred_page(pfn, nid);
+
+ /*
+ * no need for atomic set_bit because the struct
+ * page is not visible yet so nobody should
+ * access it yet.
+ */
+ __SetPageReserved(page);
+ }
+}
+
static void __init memmap_init_reserved_pages(void)
{
struct memblock_region *region;
@@ -2259,7 +2284,7 @@ static void __init memmap_init_reserved_pages(void)
end = start + region->size;
if (memblock_is_nomap(region))
- reserve_bootmem_region(start, end, nid);
+ memmap_init_reserved_range(start, end, nid);
memblock_set_node(start, region->size, &memblock.reserved, nid);
}
@@ -2284,7 +2309,7 @@ static void __init memmap_init_reserved_pages(void)
if (!numa_valid_node(nid))
nid = early_pfn_to_nid(PFN_DOWN(start));
- reserve_bootmem_region(start, end, nid);
+ memmap_init_reserved_range(start, end, nid);
}
}
}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index df34797691bd..ea8d3de43470 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -772,31 +772,6 @@ void __meminit init_deferred_page(unsigned long pfn, int nid)
__init_deferred_page(pfn, nid);
}
-/*
- * Initialised pages do not have PageReserved set. This function is
- * called for each range allocated by the bootmem allocator and
- * marks the pages PageReserved. The remaining valid pages are later
- * sent to the buddy page allocator.
- */
-void __meminit reserve_bootmem_region(phys_addr_t start,
- phys_addr_t end, int nid)
-{
- unsigned long pfn;
-
- for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
- struct page *page = pfn_to_page(pfn);
-
- __init_deferred_page(pfn, nid);
-
- /*
- * no need for atomic set_bit because the struct
- * page is not visible yet so nobody should
- * access it yet.
- */
- __SetPageReserved(page);
- }
-}
-
/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
static bool __meminit
overlap_memmap_init(unsigned long zone, unsigned long *pfn)
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
index 028f3faf46e7..74cbd51dbea2 100644
--- a/tools/include/linux/mm.h
+++ b/tools/include/linux/mm.h
@@ -32,8 +32,6 @@ static inline phys_addr_t virt_to_phys(volatile void *address)
return (phys_addr_t)address;
}
-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
-
static inline void totalram_pages_inc(void)
{
}
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 009b97bbdd22..eb02d5771f4c 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -29,4 +29,13 @@ static inline unsigned long free_reserved_area(void *start, void *end,
return 0;
}
+#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
+ for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
+
+static inline void init_deferred_page(unsigned long pfn, int nid)
+{
+}
+
+#define __SetPageReserved(p) ((void)(p))
+
#endif
diff --git a/tools/testing/memblock/mmzone.c b/tools/testing/memblock/mmzone.c
index d3d58851864e..e719450f81cb 100644
--- a/tools/testing/memblock/mmzone.c
+++ b/tools/testing/memblock/mmzone.c
@@ -11,10 +11,6 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
return NULL;
}
-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
-{
-}
-
void atomic_long_set(atomic_long_t *v, long i)
{
}
--
2.53.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static
2026-03-23 7:20 [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static Mike Rapoport
@ 2026-03-23 12:18 ` David Hildenbrand (Arm)
2026-03-23 12:37 ` Lorenzo Stoakes (Oracle)
2026-03-24 15:50 ` Mike Rapoport
2 siblings, 0 replies; 5+ messages in thread
From: David Hildenbrand (Arm) @ 2026-03-23 12:18 UTC (permalink / raw)
To: Mike Rapoport, Andrew Morton
Cc: Kees Cook, Liam R. Howlett, Lorenzo Stoakes, Michal Hocko,
Suren Baghdasaryan, Vlastimil Babka, linux-kernel, linux-mm
On 3/23/26 08:20, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
>
> reserve_bootmem_region() is only called from
> memmap_init_reserved_pages() and it was in mm/mm_init.c because of its
> dependecies on static init_deferred_page().
>
> Since init_deferred_page() is not static anymore, move
> reserve_bootmem_region(), rename it to memmap_init_reserved_range() and
> make it static.
>
> Update the comment describing it to better reflect what the function
> does and drop bogus comment about reserved pages in free_bootmem_page().
>
> Update memblock test stubs to reflect the core changes.
>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---
Reviewed-by: David Hildenbrand (Arm) <david@kernel.org>
--
Cheers,
David
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static
2026-03-23 7:20 [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static Mike Rapoport
2026-03-23 12:18 ` David Hildenbrand (Arm)
@ 2026-03-23 12:37 ` Lorenzo Stoakes (Oracle)
2026-03-23 14:01 ` Mike Rapoport
2026-03-24 15:50 ` Mike Rapoport
2 siblings, 1 reply; 5+ messages in thread
From: Lorenzo Stoakes (Oracle) @ 2026-03-23 12:37 UTC (permalink / raw)
To: Mike Rapoport
Cc: Andrew Morton, David Hildenbrand, Kees Cook, Liam R. Howlett,
Michal Hocko, Suren Baghdasaryan, Vlastimil Babka, linux-kernel,
linux-mm
On Mon, Mar 23, 2026 at 09:20:42AM +0200, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
>
> reserve_bootmem_region() is only called from
> memmap_init_reserved_pages() and it was in mm/mm_init.c because of its
> dependecies on static init_deferred_page().
>
> Since init_deferred_page() is not static anymore, move
> reserve_bootmem_region(), rename it to memmap_init_reserved_range() and
> make it static.
>
> Update the comment describing it to better reflect what the function
> does and drop bogus comment about reserved pages in free_bootmem_page().
>
> Update memblock test stubs to reflect the core changes.
>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
LGTM and passed local tests so:
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
> ---
> include/linux/bootmem_info.h | 4 ----
> include/linux/mm.h | 3 ---
> mm/memblock.c | 31 ++++++++++++++++++++++++++++---
> mm/mm_init.c | 25 -------------------------
> tools/include/linux/mm.h | 2 --
> tools/testing/memblock/internal.h | 9 +++++++++
> tools/testing/memblock/mmzone.c | 4 ----
> 7 files changed, 37 insertions(+), 41 deletions(-)
>
> diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
> index 4c506e76a808..492ceeb1cdf8 100644
> --- a/include/linux/bootmem_info.h
> +++ b/include/linux/bootmem_info.h
> @@ -44,10 +44,6 @@ static inline void free_bootmem_page(struct page *page)
> {
> enum bootmem_type type = bootmem_type(page);
>
> - /*
> - * The reserve_bootmem_region sets the reserved flag on bootmem
> - * pages.
> - */
> VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
>
> if (type == SECTION_INFO || type == MIX_SECTION_INFO)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index abb4963c1f06..764d10fdfb5d 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3686,9 +3686,6 @@ extern unsigned long free_reserved_area(void *start, void *end,
>
> extern void adjust_managed_page_count(struct page *page, long count);
>
> -extern void reserve_bootmem_region(phys_addr_t start,
> - phys_addr_t end, int nid);
> -
> /* Free the reserved page into the buddy system, so it gets managed. */
> void free_reserved_page(struct page *page);
>
> diff --git a/mm/memblock.c b/mm/memblock.c
> index b3ddfdec7a80..d504205cdbf5 100644
> --- a/mm/memblock.c
> +++ b/mm/memblock.c
> @@ -973,7 +973,7 @@ __init void memmap_init_kho_scratch_pages(void)
> /*
> * Initialize struct pages for free scratch memory.
> * The struct pages for reserved scratch memory will be set up in
> - * reserve_bootmem_region()
> + * memmap_init_reserved_pages()
> */
> __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
> MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
> @@ -2240,6 +2240,31 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
> return end_pfn - start_pfn;
> }
>
> +/*
> + * Initialised pages do not have PageReserved set. This function is called
> + * for each reserved range and marks the pages PageReserved.
> + * When deferred initialization of struct pages is enabled it also ensures
> + * that struct pages are properly initialised.
> + */
> +static void __init memmap_init_reserved_range(phys_addr_t start,
> + phys_addr_t end, int nid)
> +{
> + unsigned long pfn;
> +
> + for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
> + struct page *page = pfn_to_page(pfn);
> +
> + init_deferred_page(pfn, nid);
> +
> + /*
> + * no need for atomic set_bit because the struct
> + * page is not visible yet so nobody should
> + * access it yet.
> + */
> + __SetPageReserved(page);
> + }
> +}
> +
> static void __init memmap_init_reserved_pages(void)
> {
> struct memblock_region *region;
> @@ -2259,7 +2284,7 @@ static void __init memmap_init_reserved_pages(void)
> end = start + region->size;
>
> if (memblock_is_nomap(region))
> - reserve_bootmem_region(start, end, nid);
> + memmap_init_reserved_range(start, end, nid);
>
> memblock_set_node(start, region->size, &memblock.reserved, nid);
> }
> @@ -2284,7 +2309,7 @@ static void __init memmap_init_reserved_pages(void)
> if (!numa_valid_node(nid))
> nid = early_pfn_to_nid(PFN_DOWN(start));
>
> - reserve_bootmem_region(start, end, nid);
> + memmap_init_reserved_range(start, end, nid);
> }
> }
> }
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index df34797691bd..ea8d3de43470 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -772,31 +772,6 @@ void __meminit init_deferred_page(unsigned long pfn, int nid)
> __init_deferred_page(pfn, nid);
> }
>
> -/*
> - * Initialised pages do not have PageReserved set. This function is
> - * called for each range allocated by the bootmem allocator and
> - * marks the pages PageReserved. The remaining valid pages are later
> - * sent to the buddy page allocator.
> - */
> -void __meminit reserve_bootmem_region(phys_addr_t start,
> - phys_addr_t end, int nid)
> -{
> - unsigned long pfn;
> -
> - for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
> - struct page *page = pfn_to_page(pfn);
> -
> - __init_deferred_page(pfn, nid);
> -
> - /*
> - * no need for atomic set_bit because the struct
> - * page is not visible yet so nobody should
> - * access it yet.
> - */
> - __SetPageReserved(page);
> - }
> -}
> -
> /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
> static bool __meminit
> overlap_memmap_init(unsigned long zone, unsigned long *pfn)
> diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
> index 028f3faf46e7..74cbd51dbea2 100644
> --- a/tools/include/linux/mm.h
> +++ b/tools/include/linux/mm.h
> @@ -32,8 +32,6 @@ static inline phys_addr_t virt_to_phys(volatile void *address)
> return (phys_addr_t)address;
> }
>
> -void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
> -
> static inline void totalram_pages_inc(void)
> {
> }
> diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
> index 009b97bbdd22..eb02d5771f4c 100644
> --- a/tools/testing/memblock/internal.h
> +++ b/tools/testing/memblock/internal.h
> @@ -29,4 +29,13 @@ static inline unsigned long free_reserved_area(void *start, void *end,
> return 0;
> }
>
> +#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
> + for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
> +
> +static inline void init_deferred_page(unsigned long pfn, int nid)
> +{
> +}
> +
> +#define __SetPageReserved(p) ((void)(p))
> +
> #endif
> diff --git a/tools/testing/memblock/mmzone.c b/tools/testing/memblock/mmzone.c
> index d3d58851864e..e719450f81cb 100644
> --- a/tools/testing/memblock/mmzone.c
> +++ b/tools/testing/memblock/mmzone.c
> @@ -11,10 +11,6 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
> return NULL;
> }
>
> -void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
> -{
> -}
> -
> void atomic_long_set(atomic_long_t *v, long i)
> {
> }
> --
> 2.53.0
>
FYI I saw the below when running make in tools/testing/memblock, doesn't look
related to this change but maybe something to address?
cc -I. -I../../include -Wall -O2 -fsanitize=address -fsanitize=undefined -D CONFIG_PHYS_ADDR_T_64BIT -c -o memblock.o memblock.c
memblock.c: In function ‘memblock_add_range.isra’:
memblock.c:710:17: warning: ‘end_rgn’ may be used uninitialized [-Wmaybe-uninitialized]
710 | memblock_merge_regions(type, start_rgn, end_rgn);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
memblock.c:616:42: note: ‘end_rgn’ was declared here
616 | int idx, nr_new, start_rgn = -1, end_rgn;
| ^~~~~~~
^ permalink raw reply [flat|nested] 5+ messages in thread* Re: [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static
2026-03-23 12:37 ` Lorenzo Stoakes (Oracle)
@ 2026-03-23 14:01 ` Mike Rapoport
0 siblings, 0 replies; 5+ messages in thread
From: Mike Rapoport @ 2026-03-23 14:01 UTC (permalink / raw)
To: Lorenzo Stoakes (Oracle)
Cc: Andrew Morton, David Hildenbrand, Kees Cook, Liam R. Howlett,
Michal Hocko, Suren Baghdasaryan, Vlastimil Babka, linux-kernel,
linux-mm
On Mon, Mar 23, 2026 at 12:37:51PM +0000, Lorenzo Stoakes (Oracle) wrote:
> On Mon, Mar 23, 2026 at 09:20:42AM +0200, Mike Rapoport wrote:
> > From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> >
> > reserve_bootmem_region() is only called from
> > memmap_init_reserved_pages() and it was in mm/mm_init.c because of its
> > dependecies on static init_deferred_page().
> >
> > Since init_deferred_page() is not static anymore, move
> > reserve_bootmem_region(), rename it to memmap_init_reserved_range() and
> > make it static.
> >
> > Update the comment describing it to better reflect what the function
> > does and drop bogus comment about reserved pages in free_bootmem_page().
> >
> > Update memblock test stubs to reflect the core changes.
> >
> > Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
>
> LGTM and passed local tests so:
>
> Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Thanks!
> > ---
>
> FYI I saw the below when running make in tools/testing/memblock, doesn't look
> related to this change but maybe something to address?
>
>
> cc -I. -I../../include -Wall -O2 -fsanitize=address -fsanitize=undefined -D CONFIG_PHYS_ADDR_T_64BIT -c -o memblock.o memblock.c
> memblock.c: In function ‘memblock_add_range.isra’:
> memblock.c:710:17: warning: ‘end_rgn’ may be used uninitialized [-Wmaybe-uninitialized]
> 710 | memblock_merge_regions(type, start_rgn, end_rgn);
> | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> memblock.c:616:42: note: ‘end_rgn’ was declared here
> 616 | int idx, nr_new, start_rgn = -1, end_rgn;
> | ^~~~~~~
Oddly enough it does not warn in the kernel build, so I kept ignoring this :)
Should be fixed indeed.
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static
2026-03-23 7:20 [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static Mike Rapoport
2026-03-23 12:18 ` David Hildenbrand (Arm)
2026-03-23 12:37 ` Lorenzo Stoakes (Oracle)
@ 2026-03-24 15:50 ` Mike Rapoport
2 siblings, 0 replies; 5+ messages in thread
From: Mike Rapoport @ 2026-03-24 15:50 UTC (permalink / raw)
To: Andrew Morton, David Hildenbrand, Mike Rapoport
Cc: Kees Cook, Liam R. Howlett, Lorenzo Stoakes, Michal Hocko,
Suren Baghdasaryan, Vlastimil Babka, linux-kernel, linux-mm
On Mon, 23 Mar 2026 09:20:42 +0200, Mike Rapoport wrote:
> reserve_bootmem_region() is only called from
> memmap_init_reserved_pages() and it was in mm/mm_init.c because of its
> dependecies on static init_deferred_page().
>
> Since init_deferred_page() is not static anymore, move
> reserve_bootmem_region(), rename it to memmap_init_reserved_range() and
> make it static.
>
> [...]
Applied to for-next branch of memblock.git tree, thanks!
[1/1] memblock: move reserve_bootmem_range() to memblock.c and make it static
commit: 6e56179632b784109f11174d4c2bf9716fdb328f
tree: https://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock
branch: for-next
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2026-03-24 15:50 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-23 7:20 [PATCH v2] memblock: move reserve_bootmem_range() to memblock.c and make it static Mike Rapoport
2026-03-23 12:18 ` David Hildenbrand (Arm)
2026-03-23 12:37 ` Lorenzo Stoakes (Oracle)
2026-03-23 14:01 ` Mike Rapoport
2026-03-24 15:50 ` Mike Rapoport
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox