From: "David Hildenbrand (Arm)" <david@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
Oscar Salvador <osalvador@suse.de>,
Axel Rasmussen <axelrasmussen@google.com>,
Yuanchu Xie <yuanchu@google.com>, Wei Xu <weixugc@google.com>,
Lorenzo Stoakes <ljs@kernel.org>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Vlastimil Babka <vbabka@kernel.org>,
Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>,
Sidhartha Kumar <sidhartha.kumar@oracle.com>,
linux-mm@kvack.org, linux-cxl@vger.kernel.org,
linux-riscv@lists.infradead.org,
"David Hildenbrand (Arm)" <david@kernel.org>
Subject: [PATCH v2 15/15] mm/sparse: move memory hotplug bits to sparse-vmemmap.c
Date: Fri, 20 Mar 2026 23:13:47 +0100 [thread overview]
Message-ID: <20260320-sparsemem_cleanups-v2-15-096addc8800d@kernel.org> (raw)
In-Reply-To: <20260320-sparsemem_cleanups-v2-0-096addc8800d@kernel.org>
Let's move all memory hoptplug related code to sparse-vmemmap.c.
We only have to expose sparse_index_init(). While at it, drop the
definition of sparse_index_init() for !CONFIG_SPARSEMEM, which is unused,
and place the declaration in internal.h.
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
---
include/linux/mmzone.h | 1 -
mm/internal.h | 4 +
mm/sparse-vmemmap.c | 304 ++++++++++++++++++++++++++++++++++++++++++++++++
mm/sparse.c | 310 +------------------------------------------------
4 files changed, 310 insertions(+), 309 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index dcbbf36ed88c..e11513f581eb 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -2390,7 +2390,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#endif
#else
-#define sparse_index_init(_sec, _nid) do {} while (0)
#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
diff --git a/mm/internal.h b/mm/internal.h
index b002c91e40a5..83e781147a28 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -965,6 +965,7 @@ void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
*/
#ifdef CONFIG_SPARSEMEM
void sparse_init(void);
+int sparse_index_init(unsigned long section_nr, int nid);
static inline void sparse_init_one_section(struct mem_section *ms,
unsigned long pnum, struct page *mem_map,
@@ -1000,6 +1001,9 @@ static inline void __section_mark_present(struct mem_section *ms,
static inline void sparse_init(void) {}
#endif /* CONFIG_SPARSEMEM */
+/*
+ * mm/sparse-vmemmap.c
+ */
#ifdef CONFIG_SPARSEMEM_VMEMMAP
void sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages);
#else
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index f0690797667f..08fef7b5c8b0 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -591,3 +591,307 @@ void __init sparse_vmemmap_init_nid_late(int nid)
hugetlb_vmemmap_init_late(nid);
}
#endif
+
+static void subsection_mask_set(unsigned long *map, unsigned long pfn,
+ unsigned long nr_pages)
+{
+ int idx = subsection_map_index(pfn);
+ int end = subsection_map_index(pfn + nr_pages - 1);
+
+ bitmap_set(map, idx, end - idx + 1);
+}
+
+void __init sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
+ unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
+
+ for (nr = start_sec_nr; nr <= end_sec_nr; nr++) {
+ struct mem_section *ms;
+ unsigned long pfns;
+
+ pfns = min(nr_pages, PAGES_PER_SECTION
+ - (pfn & ~PAGE_SECTION_MASK));
+ ms = __nr_to_section(nr);
+ subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
+
+ pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
+ pfns, subsection_map_index(pfn),
+ subsection_map_index(pfn + pfns - 1));
+
+ pfn += pfns;
+ nr_pages -= pfns;
+ }
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+/* Mark all memory sections within the pfn range as online */
+void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ unsigned long section_nr = pfn_to_section_nr(pfn);
+ struct mem_section *ms = __nr_to_section(section_nr);
+
+ ms->section_mem_map |= SECTION_IS_ONLINE;
+ }
+}
+
+/* Mark all memory sections within the pfn range as offline */
+void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ unsigned long section_nr = pfn_to_section_nr(pfn);
+ struct mem_section *ms = __nr_to_section(section_nr);
+
+ ms->section_mem_map &= ~SECTION_IS_ONLINE;
+ }
+}
+
+static struct page * __meminit populate_section_memmap(unsigned long pfn,
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
+}
+
+static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap)
+{
+ unsigned long start = (unsigned long) pfn_to_page(pfn);
+ unsigned long end = start + nr_pages * sizeof(struct page);
+
+ vmemmap_free(start, end, altmap);
+}
+static void free_map_bootmem(struct page *memmap)
+{
+ unsigned long start = (unsigned long)memmap;
+ unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
+
+ vmemmap_free(start, end, NULL);
+}
+
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+ DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
+ struct mem_section *ms = __pfn_to_section(pfn);
+ unsigned long *subsection_map = ms->usage
+ ? &ms->usage->subsection_map[0] : NULL;
+
+ subsection_mask_set(map, pfn, nr_pages);
+ if (subsection_map)
+ bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
+
+ if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
+ "section already deactivated (%#lx + %ld)\n",
+ pfn, nr_pages))
+ return -EINVAL;
+
+ bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
+ return 0;
+}
+
+static bool is_subsection_map_empty(struct mem_section *ms)
+{
+ return bitmap_empty(&ms->usage->subsection_map[0],
+ SUBSECTIONS_PER_SECTION);
+}
+
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+ unsigned long *subsection_map;
+ int rc = 0;
+
+ subsection_mask_set(map, pfn, nr_pages);
+
+ subsection_map = &ms->usage->subsection_map[0];
+
+ if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
+ rc = -EINVAL;
+ else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
+ rc = -EEXIST;
+ else
+ bitmap_or(subsection_map, map, subsection_map,
+ SUBSECTIONS_PER_SECTION);
+
+ return rc;
+}
+
+/*
+ * To deactivate a memory region, there are 3 cases to handle:
+ *
+ * 1. deactivation of a partial hot-added section:
+ * a) section was present at memory init.
+ * b) section was hot-added post memory init.
+ * 2. deactivation of a complete hot-added section.
+ * 3. deactivation of a complete section from memory init.
+ *
+ * For 1, when subsection_map does not empty we will not be freeing the
+ * usage map, but still need to free the vmemmap range.
+ */
+static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+ bool section_is_early = early_section(ms);
+ struct page *memmap = NULL;
+ bool empty;
+
+ if (clear_subsection_map(pfn, nr_pages))
+ return;
+
+ empty = is_subsection_map_empty(ms);
+ if (empty) {
+ /*
+ * Mark the section invalid so that valid_section()
+ * return false. This prevents code from dereferencing
+ * ms->usage array.
+ */
+ ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
+
+ /*
+ * When removing an early section, the usage map is kept (as the
+ * usage maps of other sections fall into the same page). It
+ * will be re-used when re-adding the section - which is then no
+ * longer an early section. If the usage map is PageReserved, it
+ * was allocated during boot.
+ */
+ if (!PageReserved(virt_to_page(ms->usage))) {
+ kfree_rcu(ms->usage, rcu);
+ WRITE_ONCE(ms->usage, NULL);
+ }
+ memmap = pfn_to_page(SECTION_ALIGN_DOWN(pfn));
+ }
+
+ /*
+ * The memmap of early sections is always fully populated. See
+ * section_activate() and pfn_valid() .
+ */
+ if (!section_is_early) {
+ memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
+ depopulate_section_memmap(pfn, nr_pages, altmap);
+ } else if (memmap) {
+ memmap_boot_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page),
+ PAGE_SIZE)));
+ free_map_bootmem(memmap);
+ }
+
+ if (empty)
+ ms->section_mem_map = (unsigned long)NULL;
+}
+
+static struct page * __meminit section_activate(int nid, unsigned long pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+ struct mem_section_usage *usage = NULL;
+ struct page *memmap;
+ int rc;
+
+ if (!ms->usage) {
+ usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
+ if (!usage)
+ return ERR_PTR(-ENOMEM);
+ ms->usage = usage;
+ }
+
+ rc = fill_subsection_map(pfn, nr_pages);
+ if (rc) {
+ if (usage)
+ ms->usage = NULL;
+ kfree(usage);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * The early init code does not consider partially populated
+ * initial sections, it simply assumes that memory will never be
+ * referenced. If we hot-add memory into such a section then we
+ * do not need to populate the memmap and can simply reuse what
+ * is already there.
+ */
+ if (nr_pages < PAGES_PER_SECTION && early_section(ms))
+ return pfn_to_page(pfn);
+
+ memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
+ if (!memmap) {
+ section_deactivate(pfn, nr_pages, altmap);
+ return ERR_PTR(-ENOMEM);
+ }
+ memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
+
+ return memmap;
+}
+
+/**
+ * sparse_add_section - add a memory section, or populate an existing one
+ * @nid: The node to add section on
+ * @start_pfn: start pfn of the memory range
+ * @nr_pages: number of pfns to add in the section
+ * @altmap: alternate pfns to allocate the memmap backing store
+ * @pgmap: alternate compound page geometry for devmap mappings
+ *
+ * This is only intended for hotplug.
+ *
+ * Note that only VMEMMAP supports sub-section aligned hotplug,
+ * the proper alignment and size are gated by check_pfn_span().
+ *
+ *
+ * Return:
+ * * 0 - On success.
+ * * -EEXIST - Section has been present.
+ * * -ENOMEM - Out of memory.
+ */
+int __meminit sparse_add_section(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ unsigned long section_nr = pfn_to_section_nr(start_pfn);
+ struct mem_section *ms;
+ struct page *memmap;
+ int ret;
+
+ ret = sparse_index_init(section_nr, nid);
+ if (ret < 0)
+ return ret;
+
+ memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
+ if (IS_ERR(memmap))
+ return PTR_ERR(memmap);
+
+ /*
+ * Poison uninitialized struct pages in order to catch invalid flags
+ * combinations.
+ */
+ page_init_poison(memmap, sizeof(struct page) * nr_pages);
+
+ ms = __nr_to_section(section_nr);
+ __section_mark_present(ms, section_nr);
+
+ /* Align memmap to section boundary in the subsection case */
+ if (section_nr_to_pfn(section_nr) != start_pfn)
+ memmap = pfn_to_page(section_nr_to_pfn(section_nr));
+ sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
+
+ return 0;
+}
+
+void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+
+ if (WARN_ON_ONCE(!valid_section(ms)))
+ return;
+
+ section_deactivate(pfn, nr_pages, altmap);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/mm/sparse.c b/mm/sparse.c
index ecd4c41c0ff0..007fd52c621e 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -79,7 +79,7 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
return section;
}
-static int __meminit sparse_index_init(unsigned long section_nr, int nid)
+int __meminit sparse_index_init(unsigned long section_nr, int nid)
{
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
struct mem_section *section;
@@ -103,7 +103,7 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
return 0;
}
#else /* !SPARSEMEM_EXTREME */
-static inline int sparse_index_init(unsigned long section_nr, int nid)
+int sparse_index_init(unsigned long section_nr, int nid)
{
return 0;
}
@@ -167,40 +167,6 @@ static inline unsigned long first_present_section_nr(void)
return next_present_section_nr(-1);
}
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static void subsection_mask_set(unsigned long *map, unsigned long pfn,
- unsigned long nr_pages)
-{
- int idx = subsection_map_index(pfn);
- int end = subsection_map_index(pfn + nr_pages - 1);
-
- bitmap_set(map, idx, end - idx + 1);
-}
-
-void __init sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
- int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
- unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
-
- for (nr = start_sec_nr; nr <= end_sec_nr; nr++) {
- struct mem_section *ms;
- unsigned long pfns;
-
- pfns = min(nr_pages, PAGES_PER_SECTION
- - (pfn & ~PAGE_SECTION_MASK));
- ms = __nr_to_section(nr);
- subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
-
- pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
- pfns, subsection_map_index(pfn),
- subsection_map_index(pfn + pfns - 1));
-
- pfn += pfns;
- nr_pages -= pfns;
- }
-}
-#endif
-
/* Record a memory area against a node. */
static void __init memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -482,275 +448,3 @@ void __init sparse_init(void)
sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
vmemmap_populate_print_last();
}
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-
-/* Mark all memory sections within the pfn range as online */
-void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
-{
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
- struct mem_section *ms = __nr_to_section(section_nr);
-
- ms->section_mem_map |= SECTION_IS_ONLINE;
- }
-}
-
-/* Mark all memory sections within the pfn range as offline */
-void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
-{
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
- struct mem_section *ms = __nr_to_section(section_nr);
-
- ms->section_mem_map &= ~SECTION_IS_ONLINE;
- }
-}
-
-static struct page * __meminit populate_section_memmap(unsigned long pfn,
- unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
- struct dev_pagemap *pgmap)
-{
- return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
-}
-
-static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap)
-{
- unsigned long start = (unsigned long) pfn_to_page(pfn);
- unsigned long end = start + nr_pages * sizeof(struct page);
-
- vmemmap_free(start, end, altmap);
-}
-static void free_map_bootmem(struct page *memmap)
-{
- unsigned long start = (unsigned long)memmap;
- unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
-
- vmemmap_free(start, end, NULL);
-}
-
-static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
- DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
- struct mem_section *ms = __pfn_to_section(pfn);
- unsigned long *subsection_map = ms->usage
- ? &ms->usage->subsection_map[0] : NULL;
-
- subsection_mask_set(map, pfn, nr_pages);
- if (subsection_map)
- bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
-
- if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
- "section already deactivated (%#lx + %ld)\n",
- pfn, nr_pages))
- return -EINVAL;
-
- bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
- return 0;
-}
-
-static bool is_subsection_map_empty(struct mem_section *ms)
-{
- return bitmap_empty(&ms->usage->subsection_map[0],
- SUBSECTIONS_PER_SECTION);
-}
-
-static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
- struct mem_section *ms = __pfn_to_section(pfn);
- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
- unsigned long *subsection_map;
- int rc = 0;
-
- subsection_mask_set(map, pfn, nr_pages);
-
- subsection_map = &ms->usage->subsection_map[0];
-
- if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
- rc = -EINVAL;
- else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
- rc = -EEXIST;
- else
- bitmap_or(subsection_map, map, subsection_map,
- SUBSECTIONS_PER_SECTION);
-
- return rc;
-}
-
-/*
- * To deactivate a memory region, there are 3 cases to handle:
- *
- * 1. deactivation of a partial hot-added section:
- * a) section was present at memory init.
- * b) section was hot-added post memory init.
- * 2. deactivation of a complete hot-added section.
- * 3. deactivation of a complete section from memory init.
- *
- * For 1, when subsection_map does not empty we will not be freeing the
- * usage map, but still need to free the vmemmap range.
- */
-static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap)
-{
- struct mem_section *ms = __pfn_to_section(pfn);
- bool section_is_early = early_section(ms);
- struct page *memmap = NULL;
- bool empty;
-
- if (clear_subsection_map(pfn, nr_pages))
- return;
-
- empty = is_subsection_map_empty(ms);
- if (empty) {
- /*
- * Mark the section invalid so that valid_section()
- * return false. This prevents code from dereferencing
- * ms->usage array.
- */
- ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
-
- /*
- * When removing an early section, the usage map is kept (as the
- * usage maps of other sections fall into the same page). It
- * will be re-used when re-adding the section - which is then no
- * longer an early section. If the usage map is PageReserved, it
- * was allocated during boot.
- */
- if (!PageReserved(virt_to_page(ms->usage))) {
- kfree_rcu(ms->usage, rcu);
- WRITE_ONCE(ms->usage, NULL);
- }
- memmap = pfn_to_page(SECTION_ALIGN_DOWN(pfn));
- }
-
- /*
- * The memmap of early sections is always fully populated. See
- * section_activate() and pfn_valid() .
- */
- if (!section_is_early) {
- memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
- depopulate_section_memmap(pfn, nr_pages, altmap);
- } else if (memmap) {
- memmap_boot_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page),
- PAGE_SIZE)));
- free_map_bootmem(memmap);
- }
-
- if (empty)
- ms->section_mem_map = (unsigned long)NULL;
-}
-
-static struct page * __meminit section_activate(int nid, unsigned long pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap,
- struct dev_pagemap *pgmap)
-{
- struct mem_section *ms = __pfn_to_section(pfn);
- struct mem_section_usage *usage = NULL;
- struct page *memmap;
- int rc;
-
- if (!ms->usage) {
- usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
- if (!usage)
- return ERR_PTR(-ENOMEM);
- ms->usage = usage;
- }
-
- rc = fill_subsection_map(pfn, nr_pages);
- if (rc) {
- if (usage)
- ms->usage = NULL;
- kfree(usage);
- return ERR_PTR(rc);
- }
-
- /*
- * The early init code does not consider partially populated
- * initial sections, it simply assumes that memory will never be
- * referenced. If we hot-add memory into such a section then we
- * do not need to populate the memmap and can simply reuse what
- * is already there.
- */
- if (nr_pages < PAGES_PER_SECTION && early_section(ms))
- return pfn_to_page(pfn);
-
- memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
- if (!memmap) {
- section_deactivate(pfn, nr_pages, altmap);
- return ERR_PTR(-ENOMEM);
- }
- memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
-
- return memmap;
-}
-
-/**
- * sparse_add_section - add a memory section, or populate an existing one
- * @nid: The node to add section on
- * @start_pfn: start pfn of the memory range
- * @nr_pages: number of pfns to add in the section
- * @altmap: alternate pfns to allocate the memmap backing store
- * @pgmap: alternate compound page geometry for devmap mappings
- *
- * This is only intended for hotplug.
- *
- * Note that only VMEMMAP supports sub-section aligned hotplug,
- * the proper alignment and size are gated by check_pfn_span().
- *
- *
- * Return:
- * * 0 - On success.
- * * -EEXIST - Section has been present.
- * * -ENOMEM - Out of memory.
- */
-int __meminit sparse_add_section(int nid, unsigned long start_pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap,
- struct dev_pagemap *pgmap)
-{
- unsigned long section_nr = pfn_to_section_nr(start_pfn);
- struct mem_section *ms;
- struct page *memmap;
- int ret;
-
- ret = sparse_index_init(section_nr, nid);
- if (ret < 0)
- return ret;
-
- memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
- if (IS_ERR(memmap))
- return PTR_ERR(memmap);
-
- /*
- * Poison uninitialized struct pages in order to catch invalid flags
- * combinations.
- */
- page_init_poison(memmap, sizeof(struct page) * nr_pages);
-
- ms = __nr_to_section(section_nr);
- __section_mark_present(ms, section_nr);
-
- /* Align memmap to section boundary in the subsection case */
- if (section_nr_to_pfn(section_nr) != start_pfn)
- memmap = pfn_to_page(section_nr_to_pfn(section_nr));
- sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
-
- return 0;
-}
-
-void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap)
-{
- struct mem_section *ms = __pfn_to_section(pfn);
-
- if (WARN_ON_ONCE(!valid_section(ms)))
- return;
-
- section_deactivate(pfn, nr_pages, altmap);
-}
-#endif /* CONFIG_MEMORY_HOTPLUG */
--
2.43.0
next prev parent reply other threads:[~2026-03-20 22:14 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-20 22:13 [PATCH v2 00/15] mm: memory hot(un)plug and SPARSEMEM cleanups David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 01/15] mm/memory_hotplug: fix possible race in scan_movable_pages() David Hildenbrand (Arm)
2026-03-23 13:26 ` Lorenzo Stoakes (Oracle)
2026-03-23 13:40 ` David Hildenbrand (Arm)
2026-03-23 14:00 ` Lorenzo Stoakes (Oracle)
2026-03-20 22:13 ` [PATCH v2 02/15] mm/memory_hotplug: remove for_each_valid_pfn() usage David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 03/15] mm/sparse: remove WARN_ONs from (online|offline)_mem_sections() David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 04/15] mm/Kconfig: make CONFIG_MEMORY_HOTPLUG depend on CONFIG_SPARSEMEM_VMEMMAP David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 05/15] mm/memory_hotplug: simplify check_pfn_span() David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 06/15] mm/sparse: remove !CONFIG_SPARSEMEM_VMEMMAP leftovers for CONFIG_MEMORY_HOTPLUG David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 07/15] mm/bootmem_info: remove handling for !CONFIG_SPARSEMEM_VMEMMAP David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 08/15] mm/bootmem_info: avoid using sparse_decode_mem_map() David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 09/15] mm/sparse: remove sparse_decode_mem_map() David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 10/15] mm/sparse: remove CONFIG_MEMORY_HOTPLUG-specific usemap allocation handling David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 11/15] mm: prepare to move subsection_map_init() to mm/sparse-vmemmap.c David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 12/15] mm/sparse: drop set_section_nid() from sparse_add_section() David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 13/15] mm/sparse: move sparse_init_one_section() to internal.h David Hildenbrand (Arm)
2026-03-23 8:49 ` David Hildenbrand (Arm)
2026-03-20 22:13 ` [PATCH v2 14/15] mm/sparse: move __section_mark_present() " David Hildenbrand (Arm)
2026-03-20 22:13 ` David Hildenbrand (Arm) [this message]
2026-03-27 2:58 ` [PATCH v2 00/15] mm: memory hot(un)plug and SPARSEMEM cleanups Andrew Morton
2026-03-27 10:00 ` Lorenzo Stoakes (Oracle)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260320-sparsemem_cleanups-v2-15-096addc8800d@kernel.org \
--to=david@kernel.org \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=linux-cxl@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-riscv@lists.infradead.org \
--cc=ljs@kernel.org \
--cc=mhocko@suse.com \
--cc=osalvador@suse.de \
--cc=rppt@kernel.org \
--cc=sidhartha.kumar@oracle.com \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
--cc=weixugc@google.com \
--cc=yuanchu@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox