public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Rik van Riel <riel@surriel.com>
To: linux-kernel@vger.kernel.org
Cc: kernel-team@meta.com, linux-mm@kvack.org, david@kernel.org,
	willy@infradead.org, surenb@google.com, hannes@cmpxchg.org,
	ljs@kernel.org, ziy@nvidia.com, usama.arif@linux.dev,
	Rik van Riel <riel@meta.com>, Rik van Riel <riel@surriel.com>
Subject: [RFC PATCH 18/45] mm: page_alloc: superpageblock-aware contiguous and higher order allocation
Date: Thu, 30 Apr 2026 16:20:47 -0400	[thread overview]
Message-ID: <20260430202233.111010-19-riel@surriel.com> (raw)
In-Reply-To: <20260430202233.111010-1-riel@surriel.com>

From: Rik van Riel <riel@meta.com>

Add superpageblock-aware contiguous page allocation that leverages SPB
metadata to find ranges of clean (all-free) superpageblocks, instead of
scanning all memory with alloc_contig_range(). The SPB metadata identifies
exactly which 1GB regions have only free pages, making CMA and large
contiguous allocations more targeted.

Track contiguous allocations in superpageblock metadata by marking fully-
covered SPBs with contig_allocated, moving them to the spb_isolated list so
they don't participate in allocation steering. Fix the iteration to use
ALIGN(start, spb_pages) to correctly handle non-aligned allocation
boundaries.

Hook superpageblock-aware allocation into __alloc_pages_direct_compact()
for THP/mTHP and high-order unmovable/reclaimable allocations. For movable
allocations at pageblock_order or above, try sb_try_alloc_contig() first.
For unmovable/reclaimable, evacuate movable pages from tainted
superpageblocks to create buddy coalescing opportunities. Both paths fall
through to traditional compaction if the SPB approach fails.

Signed-off-by: Rik van Riel <riel@surriel.com>
Assisted-by: Claude:claude-opus-4.7 syzkaller
---
 include/linux/mmzone.h |   2 +
 mm/mm_init.c           |   1 +
 mm/page_alloc.c        | 452 ++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 450 insertions(+), 5 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ba6f08295ff9..765e1c5dc365 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -923,6 +923,7 @@ struct superpageblock {
 	u16			nr_movable;
 	u16			nr_reserved;	/* holes, firmware, etc. */
 	u16			total_pageblocks; /* zone-clipped total */
+	bool			contig_allocated; /* all pages held by contig alloc */
 
 	/* Total free pages across all per-superpageblock free lists */
 	unsigned long		nr_free_pages;
@@ -1010,6 +1011,7 @@ struct zone {
 
 	/* Superpageblock fullness lists for allocation steering */
 	struct list_head	spb_empty;	/* completely free superpageblocks */
+	struct list_head	spb_isolated;	/* fully isolated (1GB contig alloc) */
 	struct list_head	spb_lists[__NR_SB_CATEGORIES][__NR_SB_FULLNESS];
 
 	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 1f55ff3126a2..8e3c64d37254 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1601,6 +1601,7 @@ static void __init setup_superpageblocks(struct zone *zone)
 
 	/* Fullness lists steer allocations to preferred superpageblocks */
 	INIT_LIST_HEAD(&zone->spb_empty);
+	INIT_LIST_HEAD(&zone->spb_isolated);
 	for (cat = 0; cat < __NR_SB_CATEGORIES; cat++)
 		for (full = 0; full < __NR_SB_FULLNESS; full++)
 			INIT_LIST_HEAD(&zone->spb_lists[cat][full]);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 54b9a69bda10..8ce96db50c2f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -754,8 +754,26 @@ static inline enum sb_fullness sb_get_fullness(struct superpageblock *sb,
  */
 #ifdef CONFIG_COMPACTION
 static void spb_maybe_start_defrag(struct superpageblock *sb);
+static bool spb_needs_defrag(struct superpageblock *sb);
+static struct page *spb_try_alloc_contig(struct zone *zone,
+					unsigned long nr_pages,
+					gfp_t gfp_mask);
+static bool spb_evacuate_for_order(struct zone *zone, unsigned int order,
+				  int migratetype);
 #else
 static inline void spb_maybe_start_defrag(struct superpageblock *sb) {}
+static inline bool spb_needs_defrag(struct superpageblock *sb) { return false; }
+static inline struct page *spb_try_alloc_contig(struct zone *zone,
+					       unsigned long nr_pages,
+					       gfp_t gfp_mask)
+{
+	return NULL;
+}
+static inline bool spb_evacuate_for_order(struct zone *zone, unsigned int order,
+					 int migratetype)
+{
+	return false;
+}
 #endif
 
 static void spb_update_list(struct superpageblock *sb)
@@ -766,6 +784,11 @@ static void spb_update_list(struct superpageblock *sb)
 
 	list_del_init(&sb->list);
 
+	if (sb->contig_allocated) {
+		list_add_tail(&sb->list, &zone->spb_isolated);
+		return;
+	}
+
 	if (sb->nr_free == sb->total_pageblocks) {
 		list_add_tail(&sb->list, &zone->spb_empty);
 		return;
@@ -916,6 +939,45 @@ void __meminit init_pageblock_migratetype(struct page *page,
 	}
 }
 
+#ifdef CONFIG_CONTIG_ALLOC
+/**
+ * superpageblock_contig_mark - Mark/unmark SPBs for contiguous allocation
+ * @start: start PFN of the contiguous range
+ * @end: end PFN (exclusive) of the contiguous range
+ * @allocated: true when allocated, false when freed
+ *
+ * Called after a successful contiguous allocation (or before freeing) to
+ * mark fully-covered superpageblocks as contig_allocated. This moves them
+ * to the spb_isolated list so they don't participate in allocation steering,
+ * and makes them visible in debugfs.
+ */
+static void superpageblock_contig_mark(unsigned long start, unsigned long end,
+				       bool allocated)
+{
+	struct zone *zone = page_zone(pfn_to_page(start));
+	unsigned long spb_pages = SUPERPAGEBLOCK_NR_PAGES;
+	unsigned long pfn;
+	unsigned long flags;
+
+	/* Only track full-SPB contiguous allocations */
+	if (end - start < spb_pages)
+		return;
+
+	spin_lock_irqsave(&zone->lock, flags);
+	for (pfn = ALIGN(start, spb_pages); pfn + spb_pages <= end;
+	     pfn += spb_pages) {
+		struct superpageblock *sb = pfn_to_superpageblock(zone, pfn);
+
+		if (!sb)
+			continue;
+
+		sb->contig_allocated = allocated;
+		spb_update_list(sb);
+	}
+	spin_unlock_irqrestore(&zone->lock, flags);
+}
+#endif /* CONFIG_CONTIG_ALLOC */
+
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -4240,6 +4302,17 @@ static void __free_frozen_pages(struct page *page, unsigned int order,
 
 void free_frozen_pages(struct page *page, unsigned int order)
 {
+#ifdef CONFIG_CONTIG_ALLOC
+	/*
+	 * If freeing a superpageblock-sized (or larger) range, clear the
+	 * contig_allocated flag so the SPB returns to normal allocation.
+	 */
+	if (order >= SUPERPAGEBLOCK_ORDER) {
+		unsigned long pfn = page_to_pfn(page);
+
+		superpageblock_contig_mark(pfn, pfn + (1UL << order), false);
+	}
+#endif
 	__free_frozen_pages(page, order, FPI_NONE);
 }
 
@@ -5408,6 +5481,60 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 	if (!order)
 		return NULL;
 
+	/*
+	 * Superpageblock-aware contiguous allocation for movable high-order
+	 * allocations. Use superpageblock metadata to find clean ranges and
+	 * evacuate them via alloc_contig_frozen_range, bypassing the
+	 * blind compaction scanner entirely.
+	 */
+	if (order >= pageblock_order &&
+	    ac->migratetype == MIGRATE_MOVABLE) {
+		struct zoneref *z;
+		struct zone *zone;
+
+		for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
+					       ac->highest_zoneidx,
+					       ac->nodemask) {
+			page = spb_try_alloc_contig(zone, 1UL << order,
+						   gfp_mask);
+			if (page) {
+				prep_new_page(page, order, gfp_mask,
+					      alloc_flags);
+				*compact_result = COMPACT_SUCCESS;
+				count_vm_event(COMPACTSUCCESS);
+				return page;
+			}
+		}
+	}
+
+	/*
+	 * Superpageblock-aware targeted evacuation for unmovable/reclaimable
+	 * high-order allocations. Instead of blind compaction, find
+	 * pageblocks of the right migratetype in tainted superpageblocks
+	 * and evacuate their movable pages to create buddy coalescing
+	 * opportunities.
+	 */
+	if (ac->migratetype == MIGRATE_UNMOVABLE ||
+	    ac->migratetype == MIGRATE_RECLAIMABLE) {
+		struct zoneref *z;
+		struct zone *zone;
+
+		for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
+					       ac->highest_zoneidx,
+					       ac->nodemask) {
+			if (spb_evacuate_for_order(zone, order,
+						  ac->migratetype)) {
+				page = get_page_from_freelist(gfp_mask, order,
+							     alloc_flags, ac);
+				if (page) {
+					*compact_result = COMPACT_SUCCESS;
+					count_vm_event(COMPACTSUCCESS);
+					return page;
+				}
+			}
+		}
+	}
+
 	psi_memstall_enter(&pflags);
 	delayacct_compact_start();
 	noreclaim_flag = memalloc_noreclaim_save();
@@ -9011,6 +9138,8 @@ int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
 	}
 done:
 	undo_isolate_page_range(start, end);
+	if (!ret)
+		superpageblock_contig_mark(start, end, true);
 	return ret;
 }
 EXPORT_SYMBOL(alloc_contig_frozen_range_noprof);
@@ -9105,6 +9234,279 @@ static bool zone_spans_last_pfn(const struct zone *zone,
 	return zone_spans_pfn(zone, last_pfn);
 }
 
+/*
+ * Maximum superpageblock candidates to collect for contiguous allocation.
+ * Collected under zone->lock, then tried without it.
+ */
+#define SPB_CONTIG_MAX_CANDIDATES 4
+
+#ifdef CONFIG_COMPACTION
+/**
+ * sb_collect_contig_candidates - Find superpageblock ranges for contiguous alloc
+ * @zone: zone to search (must hold zone->lock)
+ * @nr_pages: number of contiguous pages needed
+ * @pfns: output array of candidate start PFNs
+ * @max: maximum candidates to collect
+ *
+ * For superpageblock-sized (1GB) allocations:
+ *   1. Empty superpageblocks first — no evacuation needed
+ *   2. Clean superpageblocks from almost-empty to full — less evacuation work
+ *
+ * For pageblock-sized (2MB+) sub-superpageblock allocations:
+ *   1. Clean superpageblocks from fullest to almost-empty — pack allocations
+ *      to preserve empty superpageblocks for 1GB
+ *   2. Empty superpageblocks as last resort
+ *
+ * Returns number of candidates found.
+ */
+static int sb_collect_contig_candidates(struct zone *zone,
+					unsigned long nr_pages,
+					unsigned long *pfns, int max)
+{
+	struct superpageblock *sb;
+	int full, n = 0;
+
+	lockdep_assert_held(&zone->lock);
+
+	if (nr_pages >= SUPERPAGEBLOCK_NR_PAGES) {
+		/* 1GB+: empty superpageblocks first (no evacuation needed) */
+		list_for_each_entry(sb, &zone->spb_empty, list) {
+			if (sb->total_pageblocks < SUPERPAGEBLOCK_NR_PAGEBLOCKS)
+				continue;
+			pfns[n++] = sb->start_pfn;
+			if (n >= max)
+				return n;
+		}
+		/* Then clean superpageblocks, almost-empty first (less work) */
+		for (full = __NR_SB_FULLNESS - 1; full >= 0; full--) {
+			list_for_each_entry(sb,
+					    &zone->spb_lists[SB_CLEAN][full],
+					    list) {
+				if (sb->total_pageblocks <
+				    SUPERPAGEBLOCK_NR_PAGEBLOCKS)
+					continue;
+				pfns[n++] = sb->start_pfn;
+				if (n >= max)
+					return n;
+			}
+		}
+		return n;
+	}
+
+	/*
+	 * 2MB+ sub-superpageblock allocations.
+	 * Walk clean superpageblocks fullest-first — pack allocations into
+	 * partial superpageblocks to preserve empty ones for 1GB use.
+	 * Pick one candidate per superpageblock for diversity.
+	 */
+	for (full = SB_FULL_75; full < __NR_SB_FULLNESS; full++) {
+		list_for_each_entry(sb, &zone->spb_lists[SB_CLEAN][full], list) {
+			unsigned long pfn, sb_end;
+
+			sb_end = sb->start_pfn +
+				(unsigned long)sb->total_pageblocks *
+				pageblock_nr_pages;
+			pfn = ALIGN(sb->start_pfn, nr_pages);
+
+			if (pfn + nr_pages <= sb_end) {
+				pfns[n++] = pfn;
+				if (n >= max)
+					return n;
+			}
+		}
+	}
+	/* Empty superpageblocks as last resort for 2MB */
+	list_for_each_entry(sb, &zone->spb_empty, list) {
+		unsigned long pfn = ALIGN(sb->start_pfn, nr_pages);
+		unsigned long sb_end = sb->start_pfn +
+			(unsigned long)sb->total_pageblocks *
+			pageblock_nr_pages;
+
+		if (pfn + nr_pages <= sb_end) {
+			pfns[n++] = pfn;
+			if (n >= max)
+				return n;
+		}
+	}
+	return n;
+}
+
+/**
+ * spb_try_alloc_contig - Superpageblock-aware contiguous page allocation
+ * @zone: zone to allocate from
+ * @nr_pages: number of contiguous pages needed (>= pageblock_nr_pages)
+ * @gfp_mask: GFP mask for allocation
+ *
+ * Use superpageblock metadata to quickly find suitable ranges for contiguous
+ * allocation, avoiding the brute-force PFN scan. Each candidate is tried
+ * twice to handle transient failures (e.g., temporary page pins, racing
+ * allocations), then falls through to the next candidate.
+ *
+ * Returns: page pointer on success, NULL on failure.
+ */
+static struct page *spb_try_alloc_contig(struct zone *zone,
+					unsigned long nr_pages,
+					gfp_t gfp_mask)
+{
+	unsigned long pfns[SPB_CONTIG_MAX_CANDIDATES];
+	unsigned long flags;
+	int nr_candidates, i;
+
+	if (nr_pages < pageblock_nr_pages)
+		return NULL;
+
+	spin_lock_irqsave(&zone->lock, flags);
+	nr_candidates = sb_collect_contig_candidates(zone, nr_pages,
+						     pfns,
+						     SPB_CONTIG_MAX_CANDIDATES);
+	spin_unlock_irqrestore(&zone->lock, flags);
+
+	for (i = 0; i < nr_candidates; i++) {
+		int attempts;
+
+		for (attempts = 0; attempts < 2; attempts++) {
+			int ret;
+
+			ret = alloc_contig_frozen_range_noprof(pfns[i],
+					pfns[i] + nr_pages,
+					ACR_FLAGS_NONE, gfp_mask);
+			if (!ret)
+				return pfn_to_page(pfns[i]);
+		}
+
+		/*
+		 * Failed on this candidate — rotate its superpageblock to the
+		 * tail of its list so the next call tries fresh candidates.
+		 */
+		spin_lock_irqsave(&zone->lock, flags);
+		{
+			struct superpageblock *sb =
+				pfn_to_superpageblock(zone, pfns[i]);
+			if (sb) {
+				struct list_head *head;
+
+				if (sb->nr_free == sb->total_pageblocks)
+					head = &zone->spb_empty;
+				else
+					head = &zone->spb_lists
+						[spb_get_category(sb)]
+						[sb_get_fullness(sb, spb_get_category(sb))];
+				list_move_tail(&sb->list, head);
+			}
+		}
+		spin_unlock_irqrestore(&zone->lock, flags);
+	}
+	return NULL;
+}
+
+/**
+ * sb_collect_evacuate_candidates - Find pageblocks for targeted evacuation
+ * @zone: zone to search (must hold zone->lock)
+ * @migratetype: desired migratetype (MIGRATE_UNMOVABLE or MIGRATE_RECLAIMABLE)
+ * @sb_pfns: output array of tainted superpageblock start PFNs
+ * @max: maximum candidates to collect
+ *
+ * Find tainted superpageblocks containing pageblocks of the desired migratetype
+ * that also have movable pages to evacuate. Evacuating movable pages from
+ * these pageblocks creates buddy coalescing opportunities for high-order
+ * allocations of the desired migratetype.
+ *
+ * Returns number of candidate superpageblock PFNs found.
+ */
+static int sb_collect_evacuate_candidates(struct zone *zone, int migratetype,
+					  unsigned long *sb_pfns, int max)
+{
+	struct superpageblock *sb;
+	int full, n = 0;
+
+	lockdep_assert_held(&zone->lock);
+
+	for (full = 0; full < __NR_SB_FULLNESS; full++) {
+		list_for_each_entry(sb, &zone->spb_lists[SB_TAINTED][full],
+				    list) {
+			bool has_matching;
+
+			if (!sb->nr_movable)
+				continue;
+
+			if (migratetype == MIGRATE_UNMOVABLE)
+				has_matching = sb->nr_unmovable > 0;
+			else if (migratetype == MIGRATE_RECLAIMABLE)
+				has_matching = sb->nr_reclaimable > 0;
+			else
+				continue;
+
+			if (!has_matching)
+				continue;
+
+			sb_pfns[n++] = sb->start_pfn;
+			if (n >= max)
+				return n;
+		}
+	}
+	return n;
+}
+
+/**
+ * spb_evacuate_for_order - Targeted evacuation of movable pages from
+ *                         unmovable/reclaimable pageblocks
+ * @zone: zone to work on
+ * @order: allocation order that failed
+ * @migratetype: desired migratetype (MIGRATE_UNMOVABLE or MIGRATE_RECLAIMABLE)
+ *
+ * Instead of blind compaction, use superpageblock metadata to find pageblocks
+ * of the right migratetype in tainted superpageblocks and evacuate their
+ * movable pages. This creates buddy coalescing opportunities within
+ * the pageblock, enabling higher-order allocations.
+ *
+ * Returns true if evacuation was performed (caller should retry allocation).
+ */
+static bool spb_evacuate_for_order(struct zone *zone, unsigned int order,
+				  int migratetype)
+{
+	unsigned long sb_pfns[SPB_CONTIG_MAX_CANDIDATES];
+	unsigned long flags;
+	int nr_sbs, i;
+	bool did_evacuate = false;
+
+	spin_lock_irqsave(&zone->lock, flags);
+	nr_sbs = sb_collect_evacuate_candidates(zone, migratetype,
+						sb_pfns,
+						SPB_CONTIG_MAX_CANDIDATES);
+	spin_unlock_irqrestore(&zone->lock, flags);
+
+	for (i = 0; i < nr_sbs && !did_evacuate; i++) {
+		unsigned long pfn, end_pfn;
+
+		end_pfn = sb_pfns[i] + SUPERPAGEBLOCK_NR_PAGES;
+		for (pfn = sb_pfns[i]; pfn < end_pfn;
+		     pfn += pageblock_nr_pages) {
+			struct page *page;
+
+			if (!pfn_valid(pfn))
+				continue;
+
+			/* Superpageblocks can straddle zone boundaries. */
+			if (!zone_spans_pfn(zone, pfn))
+				continue;
+
+			page = pfn_to_page(pfn);
+
+			if (get_pfnblock_migratetype(page, pfn) != migratetype)
+				continue;
+
+			if (!get_pfnblock_bit(page, pfn, PB_has_movable))
+				continue;
+
+			evacuate_pageblock(zone, pfn, true);
+			did_evacuate = true;
+			break;
+		}
+	}
+	return did_evacuate;
+}
+#endif /* CONFIG_COMPACTION */
+
 /**
  * alloc_contig_frozen_pages() -- tries to find and allocate contiguous range of frozen pages
  * @nr_pages:	Number of contiguous pages to allocate
@@ -9138,9 +9540,29 @@ struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages,
 	struct zonelist *zonelist;
 	struct zone *zone;
 	struct zoneref *z;
+	struct page *page;
 	bool skip_hugetlb = true;
 	bool skipped_hugetlb = false;
 
+	/*
+	 * First pass: superpageblock-aware search. Use superpageblock metadata
+	 * to quickly find suitable ranges, avoiding the brute-force PFN
+	 * scan. For 1GB allocations this walks spb_empty then
+	 * spb_lists[SB_CLEAN]; for 2MB+ it finds evacuatable pageblocks
+	 * in clean superpageblocks.
+	 */
+	if (nr_pages >= pageblock_nr_pages) {
+		zonelist = node_zonelist(nid, gfp_mask);
+		for_each_zone_zonelist_nodemask(zone, z, zonelist,
+					       gfp_zone(gfp_mask), nodemask) {
+			page = spb_try_alloc_contig(zone, nr_pages, gfp_mask);
+			if (page)
+				return page;
+		}
+	}
+
+	/* Second pass: brute-force PFN scan (existing fallback) */
+
 retry:
 	zonelist = node_zonelist(nid, gfp_mask);
 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
@@ -9235,6 +9657,8 @@ void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
 	if (WARN_ON_ONCE(first_page != compound_head(first_page)))
 		return;
 
+	superpageblock_contig_mark(pfn, pfn + nr_pages, false);
+
 	if (PageHead(first_page)) {
 		WARN_ON_ONCE(order != compound_order(first_page));
 		free_frozen_pages(first_page, order);
@@ -9254,9 +9678,13 @@ EXPORT_SYMBOL(free_contig_frozen_range);
  */
 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
 {
+	unsigned long end = pfn + nr_pages;
+
 	if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn))))
 		return;
 
+	superpageblock_contig_mark(pfn, end, false);
+
 	for (; nr_pages--; pfn++)
 		__free_page(pfn_to_page(pfn));
 }
@@ -9794,6 +10222,15 @@ static int superpageblock_debugfs_show(struct seq_file *m, void *v)
 		if (empty_count)
 			seq_printf(m, "  empty: %d\n", empty_count);
 
+		{
+			int isolated_count = 0;
+
+			list_for_each_entry(sb, &zone->spb_isolated, list)
+				isolated_count++;
+			if (isolated_count)
+				seq_printf(m, "  contig_alloc: %d\n", isolated_count);
+		}
+
 		for (cat = 0; cat < __NR_SB_CATEGORIES; cat++) {
 			for (full = 0; full < __NR_SB_FULLNESS; full++) {
 				int count = 0;
@@ -9812,11 +10249,16 @@ static int superpageblock_debugfs_show(struct seq_file *m, void *v)
 		/* Per-superpageblock detail */
 		for (i = 0; i < zone->nr_superpageblocks; i++) {
 			sb = &zone->superpageblocks[i];
-			seq_printf(m, "  sb[%lu] pfn=0x%lx: unmov=%u recl=%u mov=%u rsv=%u free=%u total=%u\n",
-				   i, sb->start_pfn,
-				   sb->nr_unmovable, sb->nr_reclaimable,
-				   sb->nr_movable, sb->nr_reserved,
-				   sb->nr_free, sb->total_pageblocks);
+			if (sb->contig_allocated)
+				seq_printf(m, "  sb[%lu] pfn=0x%lx: contig_allocated total=%u\n",
+					   i, sb->start_pfn,
+					   sb->total_pageblocks);
+			else
+				seq_printf(m, "  sb[%lu] pfn=0x%lx: unmov=%u recl=%u mov=%u rsv=%u free=%u total=%u\n",
+					   i, sb->start_pfn,
+					   sb->nr_unmovable, sb->nr_reclaimable,
+					   sb->nr_movable, sb->nr_reserved,
+					   sb->nr_free, sb->total_pageblocks);
 		}
 	}
 	return 0;
-- 
2.52.0


  parent reply	other threads:[~2026-04-30 20:22 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-30 20:20 [00/45 RFC PATCH] 1GB superpageblock memory allocation Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 01/45] mm: page_alloc: replace pageblock_flags bitmap with struct pageblock_data Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 02/45] mm: page_alloc: per-cpu pageblock buddy allocator Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 03/45] mm: page_alloc: use trylock for PCP lock in free path to avoid lock inversion Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 04/45] mm: mm_init: fix zone assignment for pages in unavailable ranges Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 05/45] mm: vmstat: restore per-migratetype free counts in /proc/pagetypeinfo Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 06/45] mm: page_alloc: remove watermark boost mechanism Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 07/45] mm: page_alloc: async evacuation of stolen movable pageblocks Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 08/45] mm: page_alloc: track actual page contents in pageblock flags Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 09/45] mm: page_alloc: introduce superpageblock metadata for 1GB anti-fragmentation Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 10/45] mm: page_alloc: support superpageblock resize for memory hotplug Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 11/45] mm: page_alloc: add superpageblock fullness lists for allocation steering Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 12/45] mm: page_alloc: steer pageblock stealing to tainted superpageblocks Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 13/45] mm: page_alloc: steer movable allocations to fullest clean superpageblocks Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 14/45] mm: page_alloc: extract claim_whole_block from try_to_claim_block Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 15/45] mm: page_alloc: add per-superpageblock free lists Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 16/45] mm: page_alloc: add background superpageblock defragmentation worker Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 17/45] mm: page_alloc: add within-superpageblock compaction for clean superpageblocks Rik van Riel
2026-04-30 20:20 ` Rik van Riel [this message]
2026-04-30 20:20 ` [RFC PATCH 19/45] mm: page_alloc: prevent atomic allocations from tainting clean SPBs Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 20/45] mm: page_alloc: aggressively pack non-movable allocations in tainted SPBs on large systems Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 21/45] mm: page_alloc: prefer reclaim over tainting clean superpageblocks Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 22/45] mm: page_alloc: adopt partial pageblocks from tainted superpageblocks Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 23/45] mm: page_alloc: add CONFIG_DEBUG_VM sanity checks for SPB counters Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 24/45] mm: page_alloc: targeted evacuation and dynamic reserves for tainted SPBs Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 25/45] mm: page_alloc: skip pageblock compatibility threshold in " Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 26/45] mm: page_alloc: prevent UNMOVABLE/RECLAIMABLE mixing in pageblocks Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 27/45] mm: trigger deferred SPB evacuation when atomic allocs would taint a clean SPB Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 28/45] mm: page_alloc: keep PCP refill in tainted SPBs across owned pageblocks Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 29/45] mm: page_alloc: refuse fragmenting fallback for callers with cheap fallback Rik van Riel
2026-04-30 20:20 ` [RFC PATCH 30/45] mm: page_alloc: drive slab shrink from SPB anti-fragmentation pressure Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 31/45] mm: page_alloc: cross-non-movable buddy borrow within tainted SPBs Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 32/45] mm: page_alloc: proactive high-water trigger for SPB slab shrink Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 33/45] mm: page_alloc: refuse to taint clean SPBs for atomic NORETRY callers Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 34/45] mm: page_reporting: walk per-superpageblock free lists Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 35/45] mm: show_mem: collect migratetype letters from per-superpageblock lists Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 36/45] mm: page_alloc: add alloc_flags parameter to __rmqueue_smallest Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 37/45] mm/slub: kvmalloc — add __GFP_NORETRY to large-kmalloc attempt Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 38/45] mm: page_alloc: per-(zone, order, mt) PASS_1 hint cache Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 39/45] mm: debug: prevent infinite recursion in dump_page() with CMA Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 40/45] PM: hibernate: walk per-superpageblock free lists in mark_free_pages Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 41/45] btrfs: allocate eb-attached btree pages as movable Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 42/45] mm: page_alloc: cross-MOV borrow within tainted SPBs Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 43/45] mm: page_alloc: trigger defrag from allocator hot path on tainted-SPB pressure Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 44/45] mm: page_alloc: SPB tracepoint instrumentation [DROP-FOR-UPSTREAM] Rik van Riel
2026-04-30 20:21 ` [RFC PATCH 45/45] mm: page_alloc: enlarge and unify spb_evacuate_for_order Rik van Riel
2026-05-01  7:14 ` [00/45 RFC PATCH] 1GB superpageblock memory allocation David Hildenbrand (Arm)
2026-05-01 11:58   ` Rik van Riel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260430202233.111010-19-riel@surriel.com \
    --to=riel@surriel.com \
    --cc=david@kernel.org \
    --cc=hannes@cmpxchg.org \
    --cc=kernel-team@meta.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=ljs@kernel.org \
    --cc=riel@meta.com \
    --cc=surenb@google.com \
    --cc=usama.arif@linux.dev \
    --cc=willy@infradead.org \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox