From: Raghavendra K T <raghavendra.kt@amd.com>
To: <raghavendra.kt@amd.com>
Cc: <AneeshKumar.KizhakeVeetil@arm.com>, <Michael.Day@amd.com>,
<akpm@linux-foundation.org>, <bharata@amd.com>,
<dave.hansen@intel.com>, <david@redhat.com>,
<dongjoo.linux.dev@gmail.com>, <feng.tang@intel.com>,
<gourry@gourry.net>, <hannes@cmpxchg.org>, <honggyu.kim@sk.com>,
<hughd@google.com>, <jhubbard@nvidia.com>, <jon.grimm@amd.com>,
<k.shutemov@gmail.com>, <kbusch@meta.com>,
<kmanaouil.dev@gmail.com>, <leesuyeon0506@gmail.com>,
<leillc@google.com>, <liam.howlett@oracle.com>,
<linux-kernel@vger.kernel.org>, <linux-mm@kvack.org>,
<mgorman@techsingularity.net>, <mingo@redhat.com>,
<nadav.amit@gmail.com>, <nphamcs@gmail.com>,
<peterz@infradead.org>, <riel@surriel.com>, <rientjes@google.com>,
<rppt@kernel.org>, <santosh.shukla@amd.com>, <shivankg@amd.com>,
<shy828301@gmail.com>, <sj@kernel.org>, <vbabka@suse.cz>,
<weixugc@google.com>, <willy@infradead.org>,
<ying.huang@linux.alibaba.com>, <ziy@nvidia.com>,
<Jonathan.Cameron@huawei.com>, <dave@stgolabs.net>,
<yuanchu@google.com>, <kinseyho@google.com>, <hdanton@sina.com>,
<harry.yoo@oracle.com>
Subject: [RFC PATCH V3 08/17] mm: Add throttling of mm scanning using scan_size
Date: Thu, 14 Aug 2025 15:32:58 +0000 [thread overview]
Message-ID: <20250814153307.1553061-9-raghavendra.kt@amd.com> (raw)
In-Reply-To: <20250814153307.1553061-1-raghavendra.kt@amd.com>
Before this patch, scanning is done on entire virtual address space
of all the tasks. Now the scan size is shrunk or expanded based on the
useful pages found in the last scan.
This helps to quickly get out of unnecessary scanning thus burning
lesser CPU.
Drawback: If a useful chunk is at the other end of the VMA space, it
will delay scanning and migration.
Shrink/expand algorithm for scan_size:
X : Number of useful pages in the last scan.
Y : Number of useful pages found in current scan.
Initial scan_size is 1GB
case 1: (X = 0, Y = 0)
Decrease scan_size by 2
case 2: (X = 0, Y > 0)
Aggressively change to MAX (4GB)
case 3: (X > 0, Y = 0 )
No change
case 4: (X > 0, Y > 0)
Increase scan_size by 2
Scan size is clamped between MIN (256MB) and MAX (4GB)).
TBD: Tuning based on real workloads
Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
---
mm/kscand.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/mm/kscand.c b/mm/kscand.c
index 843069048c61..39a7fcef7de8 100644
--- a/mm/kscand.c
+++ b/mm/kscand.c
@@ -28,10 +28,15 @@
static struct task_struct *kscand_thread __read_mostly;
static DEFINE_MUTEX(kscand_mutex);
+
/*
* Total VMA size to cover during scan.
+ * Min: 256MB default: 1GB max: 4GB
*/
+#define KSCAND_SCAN_SIZE_MIN (256 * 1024 * 1024UL)
+#define KSCAND_SCAN_SIZE_MAX (4 * 1024 * 1024 * 1024UL)
#define KSCAND_SCAN_SIZE (1 * 1024 * 1024 * 1024UL)
+
static unsigned long kscand_scan_size __read_mostly = KSCAND_SCAN_SIZE;
/*
@@ -94,6 +99,8 @@ struct kscand_mm_slot {
unsigned long next_scan;
/* Tracks how many useful pages obtained for migration in the last scan */
unsigned long scan_delta;
+ /* Determines how much VMA address space to be covered in the scanning */
+ unsigned long scan_size;
long address;
bool is_scanned;
};
@@ -746,6 +753,8 @@ static void kmigrated_migrate_folio(void)
*/
#define KSCAND_IGNORE_SCAN_THR 256
+#define SCAN_SIZE_CHANGE_SHIFT 1
+
/* Maintains stability of scan_period by decaying last time accessed pages */
#define SCAN_DECAY_SHIFT 4
/*
@@ -761,14 +770,26 @@ static void kmigrated_migrate_folio(void)
* Increase scan_period by (2 << SCAN_PERIOD_CHANGE_SCALE).
* case 4: (X > 0, Y > 0)
* Decrease scan_period by SCAN_PERIOD_TUNE_PERCENT.
+ * Tuning scan_size:
+ * Initial scan_size is 4GB
+ * case 1: (X = 0, Y = 0)
+ * Decrease scan_size by (1 << SCAN_SIZE_CHANGE_SHIFT).
+ * case 2: (X = 0, Y > 0)
+ * scan_size = KSCAND_SCAN_SIZE_MAX
+ * case 3: (X > 0, Y = 0 )
+ * No change
+ * case 4: (X > 0, Y > 0)
+ * Increase scan_size by (1 << SCAN_SIZE_CHANGE_SHIFT).
*/
static inline void kscand_update_mmslot_info(struct kscand_mm_slot *mm_slot,
unsigned long total)
{
unsigned int scan_period;
unsigned long now;
+ unsigned long scan_size;
unsigned long old_scan_delta;
+ scan_size = mm_slot->scan_size;
scan_period = mm_slot->scan_period;
old_scan_delta = mm_slot->scan_delta;
@@ -789,20 +810,25 @@ static inline void kscand_update_mmslot_info(struct kscand_mm_slot *mm_slot,
if (!old_scan_delta && !total) {
scan_period = (100 + SCAN_PERIOD_TUNE_PERCENT) * scan_period;
scan_period /= 100;
+ scan_size = scan_size >> SCAN_SIZE_CHANGE_SHIFT;
} else if (old_scan_delta && total) {
scan_period = (100 - SCAN_PERIOD_TUNE_PERCENT) * scan_period;
scan_period /= 100;
+ scan_size = scan_size << SCAN_SIZE_CHANGE_SHIFT;
} else if (old_scan_delta && !total) {
scan_period = scan_period << SCAN_PERIOD_CHANGE_SCALE;
} else {
scan_period = scan_period >> SCAN_PERIOD_CHANGE_SCALE;
+ scan_size = KSCAND_SCAN_SIZE_MAX;
}
scan_period = clamp(scan_period, KSCAND_SCAN_PERIOD_MIN, KSCAND_SCAN_PERIOD_MAX);
+ scan_size = clamp(scan_size, KSCAND_SCAN_SIZE_MIN, KSCAND_SCAN_SIZE_MAX);
now = jiffies;
mm_slot->next_scan = now + msecs_to_jiffies(scan_period);
mm_slot->scan_period = scan_period;
+ mm_slot->scan_size = scan_size;
mm_slot->scan_delta = total;
}
@@ -814,6 +840,7 @@ static unsigned long kscand_scan_mm_slot(void)
unsigned int mm_slot_scan_period;
unsigned long now;
unsigned long mm_slot_next_scan;
+ unsigned long mm_slot_scan_size;
unsigned long vma_scanned_size = 0;
unsigned long address;
unsigned long total = 0;
@@ -843,6 +870,7 @@ static unsigned long kscand_scan_mm_slot(void)
mm_slot->is_scanned = true;
mm_slot_next_scan = mm_slot->next_scan;
mm_slot_scan_period = mm_slot->scan_period;
+ mm_slot_scan_size = mm_slot->scan_size;
spin_unlock(&kscand_mm_lock);
if (unlikely(!mmap_read_trylock(mm)))
@@ -994,6 +1022,7 @@ void __kscand_enter(struct mm_struct *mm)
kscand_slot->address = 0;
kscand_slot->scan_period = kscand_mm_scan_period_ms;
+ kscand_slot->scan_size = kscand_scan_size;
kscand_slot->next_scan = 0;
kscand_slot->scan_delta = 0;
--
2.34.1
next prev parent reply other threads:[~2025-08-14 15:35 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-14 15:32 [RFC PATCH V3 00/17] mm: slowtier page promotion based on PTE A bit Raghavendra K T
2025-08-14 15:32 ` [RFC PATCH V3 01/17] mm: Add kscand kthread for PTE A bit scan Raghavendra K T
2025-08-14 15:32 ` [RFC PATCH V3 02/17] mm: Maintain mm_struct list in the system Raghavendra K T
2025-08-14 15:32 ` [RFC PATCH V3 03/17] mm: Scan the mm and create a migration list Raghavendra K T
2025-08-14 15:32 ` [RFC PATCH V3 04/17] mm/kscand: Add only hot pages to " Raghavendra K T
2025-08-14 15:32 ` [RFC PATCH V3 05/17] mm: Create a separate kthread for migration Raghavendra K T
2025-08-14 15:32 ` [RFC PATCH V3 06/17] mm/migration: migrate accessed folios to toptier node Raghavendra K T
2025-08-14 15:32 ` [RFC PATCH V3 07/17] mm: Add throttling of mm scanning using scan_period Raghavendra K T
2025-08-14 15:32 ` Raghavendra K T [this message]
2025-08-14 15:32 ` [RFC PATCH V3 09/17] mm: Add initial scan delay Raghavendra K T
2025-08-14 15:33 ` [RFC PATCH V3 10/17] mm: Add a heuristic to calculate target node Raghavendra K T
2025-08-14 15:33 ` [RFC PATCH V3 11/17] mm/kscand: Implement migration failure feedback Raghavendra K T
2025-08-14 15:33 ` [RFC PATCH V3 12/17] sysfs: Add sysfs support to tune scanning Raghavendra K T
2025-08-14 15:33 ` [RFC PATCH V3 13/17] mm/vmstat: Add vmstat counters Raghavendra K T
2025-08-14 15:33 ` [RFC PATCH V3 14/17] trace/kscand: Add tracing of scanning and migration Raghavendra K T
2025-08-14 15:33 ` [RFC PATCH V3 15/17] prctl: Introduce new prctl to control scanning Raghavendra K T
2025-08-14 15:33 ` [RFC PATCH V3 16/17] prctl: Fine tune scan_period with prctl scale param Raghavendra K T
2025-08-14 15:33 ` [RFC PATCH V3 17/17] mm: Create a list of fallback target nodes Raghavendra K T
2025-08-21 15:24 ` [RFC PATCH V3 00/17] mm: slowtier page promotion based on PTE A bit Raghavendra K T
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250814153307.1553061-9-raghavendra.kt@amd.com \
--to=raghavendra.kt@amd.com \
--cc=AneeshKumar.KizhakeVeetil@arm.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=Michael.Day@amd.com \
--cc=akpm@linux-foundation.org \
--cc=bharata@amd.com \
--cc=dave.hansen@intel.com \
--cc=dave@stgolabs.net \
--cc=david@redhat.com \
--cc=dongjoo.linux.dev@gmail.com \
--cc=feng.tang@intel.com \
--cc=gourry@gourry.net \
--cc=hannes@cmpxchg.org \
--cc=harry.yoo@oracle.com \
--cc=hdanton@sina.com \
--cc=honggyu.kim@sk.com \
--cc=hughd@google.com \
--cc=jhubbard@nvidia.com \
--cc=jon.grimm@amd.com \
--cc=k.shutemov@gmail.com \
--cc=kbusch@meta.com \
--cc=kinseyho@google.com \
--cc=kmanaouil.dev@gmail.com \
--cc=leesuyeon0506@gmail.com \
--cc=leillc@google.com \
--cc=liam.howlett@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=mingo@redhat.com \
--cc=nadav.amit@gmail.com \
--cc=nphamcs@gmail.com \
--cc=peterz@infradead.org \
--cc=riel@surriel.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=santosh.shukla@amd.com \
--cc=shivankg@amd.com \
--cc=shy828301@gmail.com \
--cc=sj@kernel.org \
--cc=vbabka@suse.cz \
--cc=weixugc@google.com \
--cc=willy@infradead.org \
--cc=ying.huang@linux.alibaba.com \
--cc=yuanchu@google.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).