public inbox for linux-mm@kvack.org
 help / color / mirror / Atom feed
* [PATCH] memblock: move reserve_bootmem_range() to memblock.c and make it static
@ 2026-03-22 14:31 Mike Rapoport
  2026-03-22 16:38 ` Andrew Morton
  0 siblings, 1 reply; 2+ messages in thread
From: Mike Rapoport @ 2026-03-22 14:31 UTC (permalink / raw)
  To: Andrew Morton, David Hildenbrand
  Cc: Kees Cook, Liam R. Howlett, Lorenzo Stoakes, Michal Hocko,
	Mike Rapoport, Suren Baghdasaryan, Vlastimil Babka, linux-kernel,
	linux-mm

From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>

reserve_bootmem_range() is only called from memmap_init_reserved_pages()
and it was in mm/mm_init.c because of its dependecies on static
init_deferred_pages().

Since init_deferred_pages() is not static anymore, move
reserve_bootmem_range(), rename it to memmap_init_reserved_range() and
make it static.

Update the comment describing it to better reflect what the function
does and drop bogus comment about reserved pages in free_bootmem_page().

Update memblock test stubs to reflect the core changes.

Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
 include/linux/bootmem_info.h      |  4 ----
 include/linux/mm.h                |  3 ---
 mm/memblock.c                     | 29 +++++++++++++++++++++++++++--
 mm/mm_init.c                      | 25 -------------------------
 tools/include/linux/mm.h          |  2 --
 tools/testing/memblock/internal.h |  9 +++++++++
 tools/testing/memblock/mmzone.c   |  4 ----
 7 files changed, 36 insertions(+), 40 deletions(-)

diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index 4c506e76a808..492ceeb1cdf8 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -44,10 +44,6 @@ static inline void free_bootmem_page(struct page *page)
 {
 	enum bootmem_type type = bootmem_type(page);
 
-	/*
-	 * The reserve_bootmem_region sets the reserved flag on bootmem
-	 * pages.
-	 */
 	VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
 
 	if (type == SECTION_INFO || type == MIX_SECTION_INFO)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 70747b53c7da..51af53dfe884 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3737,9 +3737,6 @@ extern unsigned long free_reserved_area(void *start, void *end,
 
 extern void adjust_managed_page_count(struct page *page, long count);
 
-extern void reserve_bootmem_region(phys_addr_t start,
-				   phys_addr_t end, int nid);
-
 /* Free the reserved page into the buddy system, so it gets managed. */
 void free_reserved_page(struct page *page);
 
diff --git a/mm/memblock.c b/mm/memblock.c
index b3ddfdec7a80..17aa8661b84d 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2240,6 +2240,31 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
 	return end_pfn - start_pfn;
 }
 
+/*
+ * Initialised pages do not have PageReserved set. This function is called
+ * for each reserved range and marks the pages PageReserved.
+ * When deferred initialization of struct pages is enabled it also ensures
+ * that struct pages are properly initialised.
+ */
+static void __init memmap_init_reserved_range(phys_addr_t start,
+					      phys_addr_t end, int nid)
+{
+	unsigned long pfn;
+
+	for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
+		struct page *page = pfn_to_page(pfn);
+
+		init_deferred_page(pfn, nid);
+
+		/*
+		 * no need for atomic set_bit because the struct
+		 * page is not visible yet so nobody should
+		 * access it yet.
+		 */
+		__SetPageReserved(page);
+	}
+}
+
 static void __init memmap_init_reserved_pages(void)
 {
 	struct memblock_region *region;
@@ -2259,7 +2284,7 @@ static void __init memmap_init_reserved_pages(void)
 		end = start + region->size;
 
 		if (memblock_is_nomap(region))
-			reserve_bootmem_region(start, end, nid);
+			memmap_init_reserved_range(start, end, nid);
 
 		memblock_set_node(start, region->size, &memblock.reserved, nid);
 	}
@@ -2284,7 +2309,7 @@ static void __init memmap_init_reserved_pages(void)
 			if (!numa_valid_node(nid))
 				nid = early_pfn_to_nid(PFN_DOWN(start));
 
-			reserve_bootmem_region(start, end, nid);
+			memmap_init_reserved_range(start, end, nid);
 		}
 	}
 }
diff --git a/mm/mm_init.c b/mm/mm_init.c
index cec7bb758bdd..96ae6024a75f 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -784,31 +784,6 @@ void __meminit init_deferred_page(unsigned long pfn, int nid)
 	__init_deferred_page(pfn, nid);
 }
 
-/*
- * Initialised pages do not have PageReserved set. This function is
- * called for each range allocated by the bootmem allocator and
- * marks the pages PageReserved. The remaining valid pages are later
- * sent to the buddy page allocator.
- */
-void __meminit reserve_bootmem_region(phys_addr_t start,
-				      phys_addr_t end, int nid)
-{
-	unsigned long pfn;
-
-	for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
-		struct page *page = pfn_to_page(pfn);
-
-		__init_deferred_page(pfn, nid);
-
-		/*
-		 * no need for atomic set_bit because the struct
-		 * page is not visible yet so nobody should
-		 * access it yet.
-		 */
-		__SetPageReserved(page);
-	}
-}
-
 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
 static bool __meminit
 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
index 028f3faf46e7..74cbd51dbea2 100644
--- a/tools/include/linux/mm.h
+++ b/tools/include/linux/mm.h
@@ -32,8 +32,6 @@ static inline phys_addr_t virt_to_phys(volatile void *address)
 	return (phys_addr_t)address;
 }
 
-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
-
 static inline void totalram_pages_inc(void)
 {
 }
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 009b97bbdd22..eb02d5771f4c 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -29,4 +29,13 @@ static inline unsigned long free_reserved_area(void *start, void *end,
 	return 0;
 }
 
+#define for_each_valid_pfn(pfn, start_pfn, end_pfn)                     \
+       for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
+
+static inline void init_deferred_page(unsigned long pfn, int nid)
+{
+}
+
+#define __SetPageReserved(p)	((void)(p))
+
 #endif
diff --git a/tools/testing/memblock/mmzone.c b/tools/testing/memblock/mmzone.c
index d3d58851864e..e719450f81cb 100644
--- a/tools/testing/memblock/mmzone.c
+++ b/tools/testing/memblock/mmzone.c
@@ -11,10 +11,6 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
 	return NULL;
 }
 
-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
-{
-}
-
 void atomic_long_set(atomic_long_t *v, long i)
 {
 }
-- 
2.53.0



^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2026-03-22 16:38 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-22 14:31 [PATCH] memblock: move reserve_bootmem_range() to memblock.c and make it static Mike Rapoport
2026-03-22 16:38 ` Andrew Morton

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox