From: "David Hildenbrand (Arm)" <david@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org, linux-cxl@vger.kernel.org,
"David Hildenbrand (Arm)" <david@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
Oscar Salvador <osalvador@suse.de>,
Axel Rasmussen <axelrasmussen@google.com>,
Yuanchu Xie <yuanchu@google.com>, Wei Xu <weixugc@google.com>,
Lorenzo Stoakes <ljs@kernel.org>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Vlastimil Babka <vbabka@kernel.org>,
Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>
Subject: [PATCH 14/14] mm/sparse: move memory hotplug bits to sparse-vmemmap.c
Date: Tue, 17 Mar 2026 17:56:52 +0100 [thread overview]
Message-ID: <20260317165652.99114-15-david@kernel.org> (raw)
In-Reply-To: <20260317165652.99114-1-david@kernel.org>
Let's move all memory hoptplug related code to sparse-vmemmap.c.
We only have to expose sparse_index_init(). While at it, drop the
definition of sparse_index_init() for !CONFIG_SPARSEMEM, which is unused,
and place the declaration in internal.h.
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
---
include/linux/mmzone.h | 1 -
mm/internal.h | 4 +
mm/sparse-vmemmap.c | 308 ++++++++++++++++++++++++++++++++++++++++
mm/sparse.c | 314 +----------------------------------------
4 files changed, 314 insertions(+), 313 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index dcbbf36ed88c..e11513f581eb 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -2390,7 +2390,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#endif
#else
-#define sparse_index_init(_sec, _nid) do {} while (0)
#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
diff --git a/mm/internal.h b/mm/internal.h
index 835a6f00134e..b1a9e9312ffe 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -965,6 +965,7 @@ void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
*/
#ifdef CONFIG_SPARSEMEM
void sparse_init(void);
+int sparse_index_init(unsigned long section_nr, int nid);
static inline void sparse_init_one_section(struct mem_section *ms,
unsigned long pnum, struct page *mem_map,
@@ -1000,6 +1001,9 @@ static inline void __section_mark_present(struct mem_section *ms,
static inline void sparse_init(void) {}
#endif /* CONFIG_SPARSEMEM */
+/*
+ * mm/sparse-vmemmap.c
+ */
#ifdef CONFIG_SPARSEMEM_VMEMMAP
void sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages);
#else
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index f0690797667f..330579365a0f 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -591,3 +591,311 @@ void __init sparse_vmemmap_init_nid_late(int nid)
hugetlb_vmemmap_init_late(nid);
}
#endif
+
+static void subsection_mask_set(unsigned long *map, unsigned long pfn,
+ unsigned long nr_pages)
+{
+ int idx = subsection_map_index(pfn);
+ int end = subsection_map_index(pfn + nr_pages - 1);
+
+ bitmap_set(map, idx, end - idx + 1);
+}
+
+void __init sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
+ unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
+
+ for (nr = start_sec_nr; nr <= end_sec_nr; nr++) {
+ struct mem_section *ms;
+ unsigned long pfns;
+
+ pfns = min(nr_pages, PAGES_PER_SECTION
+ - (pfn & ~PAGE_SECTION_MASK));
+ ms = __nr_to_section(nr);
+ subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
+
+ pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
+ pfns, subsection_map_index(pfn),
+ subsection_map_index(pfn + pfns - 1));
+
+ pfn += pfns;
+ nr_pages -= pfns;
+ }
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+/* Mark all memory sections within the pfn range as online */
+void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ unsigned long section_nr = pfn_to_section_nr(pfn);
+ struct mem_section *ms = __nr_to_section(section_nr);
+
+ ms->section_mem_map |= SECTION_IS_ONLINE;
+ }
+}
+
+/* Mark all memory sections within the pfn range as offline */
+void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ unsigned long section_nr = pfn_to_section_nr(pfn);
+ struct mem_section *ms = __nr_to_section(section_nr);
+
+ ms->section_mem_map &= ~SECTION_IS_ONLINE;
+ }
+}
+
+static struct page * __meminit populate_section_memmap(unsigned long pfn,
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
+}
+
+static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap)
+{
+ unsigned long start = (unsigned long) pfn_to_page(pfn);
+ unsigned long end = start + nr_pages * sizeof(struct page);
+
+ vmemmap_free(start, end, altmap);
+}
+static void free_map_bootmem(struct page *memmap)
+{
+ unsigned long start = (unsigned long)memmap;
+ unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
+
+ vmemmap_free(start, end, NULL);
+}
+
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+ DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
+ struct mem_section *ms = __pfn_to_section(pfn);
+ unsigned long *subsection_map = ms->usage
+ ? &ms->usage->subsection_map[0] : NULL;
+
+ subsection_mask_set(map, pfn, nr_pages);
+ if (subsection_map)
+ bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
+
+ if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
+ "section already deactivated (%#lx + %ld)\n",
+ pfn, nr_pages))
+ return -EINVAL;
+
+ bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
+ return 0;
+}
+
+static bool is_subsection_map_empty(struct mem_section *ms)
+{
+ return bitmap_empty(&ms->usage->subsection_map[0],
+ SUBSECTIONS_PER_SECTION);
+}
+
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+ unsigned long *subsection_map;
+ int rc = 0;
+
+ subsection_mask_set(map, pfn, nr_pages);
+
+ subsection_map = &ms->usage->subsection_map[0];
+
+ if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
+ rc = -EINVAL;
+ else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
+ rc = -EEXIST;
+ else
+ bitmap_or(subsection_map, map, subsection_map,
+ SUBSECTIONS_PER_SECTION);
+
+ return rc;
+}
+
+/*
+ * To deactivate a memory region, there are 3 cases to handle across
+ * two configurations (SPARSEMEM_VMEMMAP={y,n}):
+ *
+ * 1. deactivation of a partial hot-added section (only possible in
+ * the SPARSEMEM_VMEMMAP=y case).
+ * a) section was present at memory init.
+ * b) section was hot-added post memory init.
+ * 2. deactivation of a complete hot-added section.
+ * 3. deactivation of a complete section from memory init.
+ *
+ * For 1, when subsection_map does not empty we will not be freeing the
+ * usage map, but still need to free the vmemmap range.
+ *
+ * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
+ */
+static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+ bool section_is_early = early_section(ms);
+ struct page *memmap = NULL;
+ bool empty;
+
+ if (clear_subsection_map(pfn, nr_pages))
+ return;
+
+ empty = is_subsection_map_empty(ms);
+ if (empty) {
+ /*
+ * Mark the section invalid so that valid_section()
+ * return false. This prevents code from dereferencing
+ * ms->usage array.
+ */
+ ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
+
+ /*
+ * When removing an early section, the usage map is kept (as the
+ * usage maps of other sections fall into the same page). It
+ * will be re-used when re-adding the section - which is then no
+ * longer an early section. If the usage map is PageReserved, it
+ * was allocated during boot.
+ */
+ if (!PageReserved(virt_to_page(ms->usage))) {
+ kfree_rcu(ms->usage, rcu);
+ WRITE_ONCE(ms->usage, NULL);
+ }
+ memmap = pfn_to_page(SECTION_ALIGN_DOWN(pfn));
+ }
+
+ /*
+ * The memmap of early sections is always fully populated. See
+ * section_activate() and pfn_valid() .
+ */
+ if (!section_is_early) {
+ memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
+ depopulate_section_memmap(pfn, nr_pages, altmap);
+ } else if (memmap) {
+ memmap_boot_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page),
+ PAGE_SIZE)));
+ free_map_bootmem(memmap);
+ }
+
+ if (empty)
+ ms->section_mem_map = (unsigned long)NULL;
+}
+
+static struct page * __meminit section_activate(int nid, unsigned long pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+ struct mem_section_usage *usage = NULL;
+ struct page *memmap;
+ int rc;
+
+ if (!ms->usage) {
+ usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
+ if (!usage)
+ return ERR_PTR(-ENOMEM);
+ ms->usage = usage;
+ }
+
+ rc = fill_subsection_map(pfn, nr_pages);
+ if (rc) {
+ if (usage)
+ ms->usage = NULL;
+ kfree(usage);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * The early init code does not consider partially populated
+ * initial sections, it simply assumes that memory will never be
+ * referenced. If we hot-add memory into such a section then we
+ * do not need to populate the memmap and can simply reuse what
+ * is already there.
+ */
+ if (nr_pages < PAGES_PER_SECTION && early_section(ms))
+ return pfn_to_page(pfn);
+
+ memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
+ if (!memmap) {
+ section_deactivate(pfn, nr_pages, altmap);
+ return ERR_PTR(-ENOMEM);
+ }
+ memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
+
+ return memmap;
+}
+
+/**
+ * sparse_add_section - add a memory section, or populate an existing one
+ * @nid: The node to add section on
+ * @start_pfn: start pfn of the memory range
+ * @nr_pages: number of pfns to add in the section
+ * @altmap: alternate pfns to allocate the memmap backing store
+ * @pgmap: alternate compound page geometry for devmap mappings
+ *
+ * This is only intended for hotplug.
+ *
+ * Note that only VMEMMAP supports sub-section aligned hotplug,
+ * the proper alignment and size are gated by check_pfn_span().
+ *
+ *
+ * Return:
+ * * 0 - On success.
+ * * -EEXIST - Section has been present.
+ * * -ENOMEM - Out of memory.
+ */
+int __meminit sparse_add_section(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ unsigned long section_nr = pfn_to_section_nr(start_pfn);
+ struct mem_section *ms;
+ struct page *memmap;
+ int ret;
+
+ ret = sparse_index_init(section_nr, nid);
+ if (ret < 0)
+ return ret;
+
+ memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
+ if (IS_ERR(memmap))
+ return PTR_ERR(memmap);
+
+ /*
+ * Poison uninitialized struct pages in order to catch invalid flags
+ * combinations.
+ */
+ page_init_poison(memmap, sizeof(struct page) * nr_pages);
+
+ ms = __nr_to_section(section_nr);
+ __section_mark_present(ms, section_nr);
+
+ /* Align memmap to section boundary in the subsection case */
+ if (section_nr_to_pfn(section_nr) != start_pfn)
+ memmap = pfn_to_page(section_nr_to_pfn(section_nr));
+ sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
+
+ return 0;
+}
+
+void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+
+ if (WARN_ON_ONCE(!valid_section(ms)))
+ return;
+
+ section_deactivate(pfn, nr_pages, altmap);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/mm/sparse.c b/mm/sparse.c
index bf620f3fe05d..007fd52c621e 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -79,7 +79,7 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
return section;
}
-static int __meminit sparse_index_init(unsigned long section_nr, int nid)
+int __meminit sparse_index_init(unsigned long section_nr, int nid)
{
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
struct mem_section *section;
@@ -103,7 +103,7 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
return 0;
}
#else /* !SPARSEMEM_EXTREME */
-static inline int sparse_index_init(unsigned long section_nr, int nid)
+int sparse_index_init(unsigned long section_nr, int nid)
{
return 0;
}
@@ -167,40 +167,6 @@ static inline unsigned long first_present_section_nr(void)
return next_present_section_nr(-1);
}
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static void subsection_mask_set(unsigned long *map, unsigned long pfn,
- unsigned long nr_pages)
-{
- int idx = subsection_map_index(pfn);
- int end = subsection_map_index(pfn + nr_pages - 1);
-
- bitmap_set(map, idx, end - idx + 1);
-}
-
-void __init sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
- int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
- unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
-
- for (nr = start_sec_nr; nr <= end_sec_nr; nr++) {
- struct mem_section *ms;
- unsigned long pfns;
-
- pfns = min(nr_pages, PAGES_PER_SECTION
- - (pfn & ~PAGE_SECTION_MASK));
- ms = __nr_to_section(nr);
- subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
-
- pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
- pfns, subsection_map_index(pfn),
- subsection_map_index(pfn + pfns - 1));
-
- pfn += pfns;
- nr_pages -= pfns;
- }
-}
-#endif
-
/* Record a memory area against a node. */
static void __init memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -482,279 +448,3 @@ void __init sparse_init(void)
sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
vmemmap_populate_print_last();
}
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-
-/* Mark all memory sections within the pfn range as online */
-void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
-{
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
- struct mem_section *ms = __nr_to_section(section_nr);
-
- ms->section_mem_map |= SECTION_IS_ONLINE;
- }
-}
-
-/* Mark all memory sections within the pfn range as offline */
-void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
-{
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
- struct mem_section *ms = __nr_to_section(section_nr);
-
- ms->section_mem_map &= ~SECTION_IS_ONLINE;
- }
-}
-
-static struct page * __meminit populate_section_memmap(unsigned long pfn,
- unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
- struct dev_pagemap *pgmap)
-{
- return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
-}
-
-static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap)
-{
- unsigned long start = (unsigned long) pfn_to_page(pfn);
- unsigned long end = start + nr_pages * sizeof(struct page);
-
- vmemmap_free(start, end, altmap);
-}
-static void free_map_bootmem(struct page *memmap)
-{
- unsigned long start = (unsigned long)memmap;
- unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
-
- vmemmap_free(start, end, NULL);
-}
-
-static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
- DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
- struct mem_section *ms = __pfn_to_section(pfn);
- unsigned long *subsection_map = ms->usage
- ? &ms->usage->subsection_map[0] : NULL;
-
- subsection_mask_set(map, pfn, nr_pages);
- if (subsection_map)
- bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
-
- if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
- "section already deactivated (%#lx + %ld)\n",
- pfn, nr_pages))
- return -EINVAL;
-
- bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
- return 0;
-}
-
-static bool is_subsection_map_empty(struct mem_section *ms)
-{
- return bitmap_empty(&ms->usage->subsection_map[0],
- SUBSECTIONS_PER_SECTION);
-}
-
-static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
- struct mem_section *ms = __pfn_to_section(pfn);
- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
- unsigned long *subsection_map;
- int rc = 0;
-
- subsection_mask_set(map, pfn, nr_pages);
-
- subsection_map = &ms->usage->subsection_map[0];
-
- if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
- rc = -EINVAL;
- else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
- rc = -EEXIST;
- else
- bitmap_or(subsection_map, map, subsection_map,
- SUBSECTIONS_PER_SECTION);
-
- return rc;
-}
-
-/*
- * To deactivate a memory region, there are 3 cases to handle across
- * two configurations (SPARSEMEM_VMEMMAP={y,n}):
- *
- * 1. deactivation of a partial hot-added section (only possible in
- * the SPARSEMEM_VMEMMAP=y case).
- * a) section was present at memory init.
- * b) section was hot-added post memory init.
- * 2. deactivation of a complete hot-added section.
- * 3. deactivation of a complete section from memory init.
- *
- * For 1, when subsection_map does not empty we will not be freeing the
- * usage map, but still need to free the vmemmap range.
- *
- * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
- */
-static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap)
-{
- struct mem_section *ms = __pfn_to_section(pfn);
- bool section_is_early = early_section(ms);
- struct page *memmap = NULL;
- bool empty;
-
- if (clear_subsection_map(pfn, nr_pages))
- return;
-
- empty = is_subsection_map_empty(ms);
- if (empty) {
- /*
- * Mark the section invalid so that valid_section()
- * return false. This prevents code from dereferencing
- * ms->usage array.
- */
- ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
-
- /*
- * When removing an early section, the usage map is kept (as the
- * usage maps of other sections fall into the same page). It
- * will be re-used when re-adding the section - which is then no
- * longer an early section. If the usage map is PageReserved, it
- * was allocated during boot.
- */
- if (!PageReserved(virt_to_page(ms->usage))) {
- kfree_rcu(ms->usage, rcu);
- WRITE_ONCE(ms->usage, NULL);
- }
- memmap = pfn_to_page(SECTION_ALIGN_DOWN(pfn));
- }
-
- /*
- * The memmap of early sections is always fully populated. See
- * section_activate() and pfn_valid() .
- */
- if (!section_is_early) {
- memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
- depopulate_section_memmap(pfn, nr_pages, altmap);
- } else if (memmap) {
- memmap_boot_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page),
- PAGE_SIZE)));
- free_map_bootmem(memmap);
- }
-
- if (empty)
- ms->section_mem_map = (unsigned long)NULL;
-}
-
-static struct page * __meminit section_activate(int nid, unsigned long pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap,
- struct dev_pagemap *pgmap)
-{
- struct mem_section *ms = __pfn_to_section(pfn);
- struct mem_section_usage *usage = NULL;
- struct page *memmap;
- int rc;
-
- if (!ms->usage) {
- usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
- if (!usage)
- return ERR_PTR(-ENOMEM);
- ms->usage = usage;
- }
-
- rc = fill_subsection_map(pfn, nr_pages);
- if (rc) {
- if (usage)
- ms->usage = NULL;
- kfree(usage);
- return ERR_PTR(rc);
- }
-
- /*
- * The early init code does not consider partially populated
- * initial sections, it simply assumes that memory will never be
- * referenced. If we hot-add memory into such a section then we
- * do not need to populate the memmap and can simply reuse what
- * is already there.
- */
- if (nr_pages < PAGES_PER_SECTION && early_section(ms))
- return pfn_to_page(pfn);
-
- memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
- if (!memmap) {
- section_deactivate(pfn, nr_pages, altmap);
- return ERR_PTR(-ENOMEM);
- }
- memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
-
- return memmap;
-}
-
-/**
- * sparse_add_section - add a memory section, or populate an existing one
- * @nid: The node to add section on
- * @start_pfn: start pfn of the memory range
- * @nr_pages: number of pfns to add in the section
- * @altmap: alternate pfns to allocate the memmap backing store
- * @pgmap: alternate compound page geometry for devmap mappings
- *
- * This is only intended for hotplug.
- *
- * Note that only VMEMMAP supports sub-section aligned hotplug,
- * the proper alignment and size are gated by check_pfn_span().
- *
- *
- * Return:
- * * 0 - On success.
- * * -EEXIST - Section has been present.
- * * -ENOMEM - Out of memory.
- */
-int __meminit sparse_add_section(int nid, unsigned long start_pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap,
- struct dev_pagemap *pgmap)
-{
- unsigned long section_nr = pfn_to_section_nr(start_pfn);
- struct mem_section *ms;
- struct page *memmap;
- int ret;
-
- ret = sparse_index_init(section_nr, nid);
- if (ret < 0)
- return ret;
-
- memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
- if (IS_ERR(memmap))
- return PTR_ERR(memmap);
-
- /*
- * Poison uninitialized struct pages in order to catch invalid flags
- * combinations.
- */
- page_init_poison(memmap, sizeof(struct page) * nr_pages);
-
- ms = __nr_to_section(section_nr);
- __section_mark_present(ms, section_nr);
-
- /* Align memmap to section boundary in the subsection case */
- if (section_nr_to_pfn(section_nr) != start_pfn)
- memmap = pfn_to_page(section_nr_to_pfn(section_nr));
- sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
-
- return 0;
-}
-
-void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap)
-{
- struct mem_section *ms = __pfn_to_section(pfn);
-
- if (WARN_ON_ONCE(!valid_section(ms)))
- return;
-
- section_deactivate(pfn, nr_pages, altmap);
-}
-#endif /* CONFIG_MEMORY_HOTPLUG */
--
2.43.0
next prev parent reply other threads:[~2026-03-17 16:57 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-17 16:56 [PATCH 00/14] mm: memory hot(un)plug and SPARSEMEM cleanups David Hildenbrand (Arm)
2026-03-17 16:56 ` [PATCH 01/14] mm/memory_hotplug: remove for_each_valid_pfn() usage David Hildenbrand (Arm)
2026-03-17 17:19 ` Lorenzo Stoakes (Oracle)
2026-03-17 20:30 ` David Hildenbrand (Arm)
2026-03-18 7:51 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 02/14] mm/sparse: remove WARN_ONs from (online|offline)_mem_sections() David Hildenbrand (Arm)
2026-03-17 17:21 ` Lorenzo Stoakes (Oracle)
2026-03-18 7:53 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 03/14] mm/Kconfig: make CONFIG_MEMORY_HOTPLUG depend on CONFIG_SPARSEMEM_VMEMMAP David Hildenbrand (Arm)
2026-03-17 17:22 ` Lorenzo Stoakes (Oracle)
2026-03-18 7:55 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 04/14] mm/memory_hotplug: simplify check_pfn_span() David Hildenbrand (Arm)
2026-03-17 17:24 ` Lorenzo Stoakes (Oracle)
2026-03-18 7:56 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 05/14] mm/sparse: remove !CONFIG_SPARSEMEM_VMEMMAP leftovers for CONFIG_MEMORY_HOTPLUG David Hildenbrand (Arm)
2026-03-17 17:54 ` Lorenzo Stoakes (Oracle)
2026-03-18 7:58 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 06/14] mm/bootmem_info: remove handling for !CONFIG_SPARSEMEM_VMEMMAP David Hildenbrand (Arm)
2026-03-17 17:49 ` Lorenzo Stoakes (Oracle)
2026-03-18 8:15 ` Mike Rapoport
2026-03-20 18:37 ` David Hildenbrand (Arm)
2026-03-17 16:56 ` [PATCH 07/14] mm/bootmem_info: avoid using sparse_decode_mem_map() David Hildenbrand (Arm)
2026-03-17 18:02 ` Lorenzo Stoakes (Oracle)
2026-03-18 8:20 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 08/14] mm/sparse: remove sparse_decode_mem_map() David Hildenbrand (Arm)
2026-03-17 19:25 ` Lorenzo Stoakes (Oracle)
2026-03-18 8:20 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 09/14] mm/sparse: remove CONFIG_MEMORY_HOTPLUG-specific usemap allocation handling David Hildenbrand (Arm)
2026-03-17 19:48 ` Lorenzo Stoakes (Oracle)
2026-03-20 18:49 ` David Hildenbrand (Arm)
2026-03-20 18:58 ` David Hildenbrand (Arm)
2026-03-18 8:34 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 10/14] mm: prepare to move subsection_map_init() to mm/sparse-vmemmap.c David Hildenbrand (Arm)
2026-03-17 19:51 ` Lorenzo Stoakes (Oracle)
2026-03-20 18:59 ` David Hildenbrand (Arm)
2026-03-18 8:46 ` Mike Rapoport
2026-03-20 19:01 ` David Hildenbrand (Arm)
2026-03-17 16:56 ` [PATCH 11/14] mm/sparse: drop set_section_nid() from sparse_add_section() David Hildenbrand (Arm)
2026-03-17 19:55 ` Lorenzo Stoakes (Oracle)
2026-03-18 8:50 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 12/14] mm/sparse: move sparse_init_one_section() to internal.h David Hildenbrand (Arm)
2026-03-17 20:00 ` Lorenzo Stoakes (Oracle)
2026-03-18 8:54 ` Mike Rapoport
2026-03-17 16:56 ` [PATCH 13/14] mm/sparse: move __section_mark_present() " David Hildenbrand (Arm)
2026-03-17 20:01 ` Lorenzo Stoakes (Oracle)
2026-03-18 8:56 ` Mike Rapoport
2026-03-20 19:06 ` David Hildenbrand (Arm)
2026-03-17 16:56 ` David Hildenbrand (Arm) [this message]
2026-03-17 20:09 ` [PATCH 14/14] mm/sparse: move memory hotplug bits to sparse-vmemmap.c Lorenzo Stoakes (Oracle)
2026-03-20 19:07 ` David Hildenbrand (Arm)
2026-03-18 8:57 ` Mike Rapoport
2026-03-18 19:51 ` [PATCH 00/14] mm: memory hot(un)plug and SPARSEMEM cleanups Andrew Morton
2026-03-18 19:54 ` David Hildenbrand (Arm)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260317165652.99114-15-david@kernel.org \
--to=david@kernel.org \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=linux-cxl@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ljs@kernel.org \
--cc=mhocko@suse.com \
--cc=osalvador@suse.de \
--cc=rppt@kernel.org \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
--cc=weixugc@google.com \
--cc=yuanchu@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.