linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: Replace (20 - PAGE_SHIFT) with common macros for pages<->MB conversion
@ 2025-07-18  2:41 Ye Liu
  2025-07-18  3:21 ` Andrew Morton
                   ` (7 more replies)
  0 siblings, 8 replies; 15+ messages in thread
From: Ye Liu @ 2025-07-18  2:41 UTC (permalink / raw)
  To: Andrew Morton, David Hildenbrand, Davidlohr Bueso,
	Paul E. McKenney, Josh Triplett, Frederic Weisbecker,
	Neeraj Upadhyay, Joel Fernandes, Boqun Feng, Uladzislau Rezki,
	Ingo Molnar, Peter Zijlstra, Juri Lelli, Vincent Guittot,
	Lorenzo Stoakes
  Cc: Ye Liu, Liam R. Howlett, Vlastimil Babka, Mike Rapoport,
	Suren Baghdasaryan, Michal Hocko, Steven Rostedt,
	Mathieu Desnoyers, Lai Jiangshan, Zqiang, Dietmar Eggemann,
	Ben Segall, Mel Gorman, Valentin Schneider, Zi Yan, Baolin Wang,
	Nico Pache, Ryan Roberts, Dev Jain, Barry Song, Kemeng Shi,
	Kairui Song, Nhat Pham, Baoquan He, Chris Li, linux-mm,
	linux-kernel, rcu

From: Ye Liu <liuye@kylinos.cn>

Replace repeated (20 - PAGE_SHIFT) calculations with standard macros:
- MB_TO_PAGES(mb)    converts MB to page count
- PAGES_TO_MB(pages) converts pages to MB

No functional change.

Signed-off-by: Ye Liu <liuye@kylinos.cn>
---
 include/linux/mm.h    | 9 +++++++++
 kernel/rcu/rcuscale.c | 2 +-
 kernel/sched/fair.c   | 5 ++---
 mm/backing-dev.c      | 2 +-
 mm/huge_memory.c      | 2 +-
 mm/swap.c             | 2 +-
 6 files changed, 15 insertions(+), 7 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 957acde6ae62..0c1b2c074142 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -69,6 +69,15 @@ static inline void totalram_pages_add(long count)
 
 extern void * high_memory;
 
+/*
+ * Convert between pages and MB
+ * 20 is the shift for 1MB (2^20 = 1MB)
+ * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
+ * So (20 - PAGE_SHIFT) converts between pages and MB
+ */
+#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MB_TO_PAGES(mb)    ((mb) << (20 - PAGE_SHIFT))
+
 #ifdef CONFIG_SYSCTL
 extern int sysctl_legacy_va_layout;
 #else
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index b521d0455992..7484d8ad5767 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -796,7 +796,7 @@ kfree_scale_thread(void *arg)
 		pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
 		       (unsigned long long)(end_time - start_time), kfree_loops,
 		       rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
-		       (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
+		       PAGES_TO_MB(mem_begin - mem_during));
 
 		if (shutdown) {
 			smp_mb(); /* Assign before wake. */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b9b4bbbf0af6..ae1d9a7ef202 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1489,7 +1489,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
 	 * by the PTE scanner and NUMA hinting faults should be trapped based
 	 * on resident pages
 	 */
-	nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
+	nr_scan_pages = MB_TO_PAGES(sysctl_numa_balancing_scan_size);
 	rss = get_mm_rss(p->mm);
 	if (!rss)
 		rss = nr_scan_pages;
@@ -1926,8 +1926,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
 		}
 
 		def_th = sysctl_numa_balancing_hot_threshold;
-		rate_limit = sysctl_numa_balancing_promote_rate_limit << \
-			(20 - PAGE_SHIFT);
+		rate_limit = MB_TO_PAGES(sysctl_numa_balancing_promote_rate_limit);
 		numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
 
 		th = pgdat->nbp_threshold ? : def_th;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 783904d8c5ef..e4d578e6121c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -510,7 +510,7 @@ static void wb_update_bandwidth_workfn(struct work_struct *work)
 /*
  * Initial write bandwidth: 100 MB/s
  */
-#define INIT_BW		(100 << (20 - PAGE_SHIFT))
+#define INIT_BW		MB_TO_PAGES(100)
 
 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 		   gfp_t gfp)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 389620c65a5f..dcc33d9c300f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -911,7 +911,7 @@ static int __init hugepage_init(void)
 	 * where the extra memory used could hurt more than TLB overhead
 	 * is likely to save.  The admin can still enable it through /sys.
 	 */
-	if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
+	if (totalram_pages() < MB_TO_PAGES(512)) {
 		transparent_hugepage_flags = 0;
 		return 0;
 	}
diff --git a/mm/swap.c b/mm/swap.c
index 3632dd061beb..cb164f9ef9e3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1096,7 +1096,7 @@ static const struct ctl_table swap_sysctl_table[] = {
  */
 void __init swap_setup(void)
 {
-	unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
+	unsigned long megs = PAGES_TO_MB(totalram_pages());
 
 	/* Use a smaller cluster for small-memory machines */
 	if (megs < 16)
-- 
2.43.0



^ permalink raw reply related	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2025-07-20 18:14 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-18  2:41 [PATCH] mm: Replace (20 - PAGE_SHIFT) with common macros for pages<->MB conversion Ye Liu
2025-07-18  3:21 ` Andrew Morton
2025-07-18  7:27 ` David Hildenbrand
2025-07-18  9:42 ` Dev Jain
2025-07-18  9:46 ` Dev Jain
2025-07-18  9:49   ` Lorenzo Stoakes
2025-07-18  9:57 ` Lorenzo Stoakes
2025-07-18 14:30   ` Matthew Wilcox
2025-07-18 14:32 ` Zi Yan
2025-07-18 17:47 ` Chris Li
2025-07-18 20:06 ` kernel test robot
2025-07-19  6:56   ` Lorenzo Stoakes
2025-07-19 23:38     ` Andrew Morton
2025-07-20  8:59       ` Lorenzo Stoakes
2025-07-20 18:14         ` Andrew Morton

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).