linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Vern Hao <haoxing990@gmail.com>
To: mgorman@techsingularity.net
Cc: akpm@linux-foundation.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, haoxing990@gmail.com,
	Xin Hao <vernhao@tencent.com>
Subject: [PATCH RFC v1 2/2] mm, pcp: add more detail info about high order page count
Date: Mon, 15 Jan 2024 17:34:36 +0800	[thread overview]
Message-ID: <20240115093437.87814-3-vernhao@tencent.com> (raw)
In-Reply-To: <20240115093437.87814-1-vernhao@tencent.com>

From: Xin Hao <vernhao@tencent.com>

With this patch, we can see the distribution of pages of different orders on
each cpu, just like below.
	#cat /proc/zoneinfo
    ....
    cpu: 2
              total_count: 14286
                  order0 : 1260
                  order1 : 13
                  order2 : 42
                  order3 : 4
                  order4 : 0
                  order5 : 0
                  order6 : 0
                  order7 : 0
                  order8 : 0
                  order9 : 25
                  order10: 0
                  order11: 0
                  order12: 0
              high:  14541
              batch: 63

Signed-off-by: Xin Hao <vernhao@tencent.com>
---
 include/linux/mmzone.h |  1 +
 mm/page_alloc.c        |  4 ++++
 mm/vmstat.c            | 18 ++++++++++++------
 3 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 883168776fea..55d25b4f51e5 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -684,6 +684,7 @@ enum zone_watermarks {
 struct per_cpu_pages {
 	spinlock_t lock;	/* Protects lists field */
 	int total_count;	/* total number of pages in the list */
+	int count[NR_PCP_LISTS]; /* per-order page counts */
 	int high;		/* high watermark, emptying needed */
 	int high_min;		/* min high watermark */
 	int high_max;		/* max high watermark */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4e91e429b8d1..7ec2dc5c5ea5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1228,6 +1228,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 			list_del(&page->pcp_list);
 			count -= nr_pages;
 			pcp->total_count -= nr_pages;
+			pcp->count[order] -= 1;
 
 			/* MIGRATE_ISOLATE page should not go to pcplists */
 			VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
@@ -2478,6 +2479,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
 	pindex = order_to_pindex(migratetype, order);
 	list_add(&page->pcp_list, &pcp->lists[pindex]);
 	pcp->total_count += 1 << order;
+	pcp->count[order] += 1;
 
 	batch = READ_ONCE(pcp->batch);
 	/*
@@ -2858,6 +2860,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
 					migratetype, alloc_flags);
 
 			pcp->total_count += alloced << order;
+			pcp->count[order] += alloced;
 			if (unlikely(list_empty(list)))
 				return NULL;
 		}
@@ -2865,6 +2868,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
 		page = list_first_entry(list, struct page, pcp_list);
 		list_del(&page->pcp_list);
 		pcp->total_count -= 1 << order;
+		pcp->count[order] -= 1;
 	} while (check_new_pages(page, order));
 
 	return page;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c1e8096ff0a6..e04300ec450f 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1735,19 +1735,25 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
 
 	seq_printf(m, "\n  pagesets");
 	for_each_online_cpu(i) {
+		int j;
 		struct per_cpu_pages *pcp;
 		struct per_cpu_zonestat __maybe_unused *pzstats;
 
 		pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
 		seq_printf(m,
 			   "\n    cpu: %i"
-			   "\n              count: %i"
-			   "\n              high:  %i"
-			   "\n              batch: %i",
+			   "\n              total_count: %i",
 			   i,
-			   pcp->total_count,
-			   pcp->high,
-			   pcp->batch);
+			   pcp->total_count);
+		for (j = 0; j < NR_PCP_LISTS; j++)
+			seq_printf(m,
+				   "\n                  order%-2i: %-3i",
+				   j, pcp->count[j]);
+		seq_printf(m,
+                          "\n              high:  %i"
+                          "\n              batch: %i",
+                          pcp->high,
+                          pcp->batch);
 #ifdef CONFIG_SMP
 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
 		seq_printf(m, "\n  vm stats threshold: %d",
-- 
2.31.1



  parent reply	other threads:[~2024-01-15  9:34 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-15  9:34 [PATCH RFC v1 0/2] mm, pcp: add high order page info in /proc/zoneinfo Vern Hao
2024-01-15  9:34 ` [PATCH RFC v1 1/2] mm, pcp: rename pcp->count to pcp->total_count Vern Hao
2024-01-15  9:34 ` Vern Hao [this message]
2024-01-15 20:34   ` [PATCH RFC v1 2/2] mm, pcp: add more detail info about high order page count David Rientjes
2024-01-16 14:31   ` Mel Gorman
2024-01-15 20:32 ` [PATCH RFC v1 0/2] mm, pcp: add high order page info in /proc/zoneinfo David Rientjes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240115093437.87814-3-vernhao@tencent.com \
    --to=haoxing990@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@techsingularity.net \
    --cc=vernhao@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).