From: Bijan Tabatabai <bijan311@gmail.com>
To: damon@lists.linux.dev, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org
Cc: sj@kernel.org, akpm@linux-foundation.org, corbet@lwn.net,
bijantabatab@micron.com, venkataravis@micron.com,
emirakhur@micron.com, vtavarespetr@micron.com,
ajayjoshi@micron.com,
Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Subject: [PATCH v4 09/13] mm/damon/vaddr: Add vaddr versions of migrate_{hot,cold}
Date: Tue, 8 Jul 2025 19:59:39 -0500 [thread overview]
Message-ID: <20250709005952.17776-10-bijan311@gmail.com> (raw)
In-Reply-To: <20250709005952.17776-1-bijan311@gmail.com>
From: Bijan Tabatabai <bijantabatab@micron.com>
migrate_{hot,cold} are paddr schemes that are used to migrate hot/cold
data to a specified node. However, these schemes are only available when
doing physical address monitoring. This patch adds an implementation for
them virtual address monitoring as well.
Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
---
mm/damon/vaddr.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 98 insertions(+)
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 46554e49a478..b244ac056416 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -15,6 +15,7 @@
#include <linux/pagewalk.h>
#include <linux/sched/mm.h>
+#include "../internal.h"
#include "ops-common.h"
#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
@@ -610,6 +611,68 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ struct list_head *migration_list = walk->private;
+ struct folio *folio;
+ spinlock_t *ptl;
+ pmd_t pmde;
+
+ ptl = pmd_lock(walk->mm, pmd);
+ pmde = pmdp_get(pmd);
+
+ if (!pmd_present(pmde) || !pmd_trans_huge(pmde))
+ goto unlock;
+
+ /* Tell page walk code to not split the PMD */
+ walk->action = ACTION_CONTINUE;
+
+ folio = damon_get_folio(pmd_pfn(pmde));
+ if (!folio)
+ goto unlock;
+
+ if (!folio_isolate_lru(folio))
+ goto put_folio;
+
+ list_add(&folio->lru, migration_list);
+
+put_folio:
+ folio_put(folio);
+unlock:
+ spin_unlock(ptl);
+ return 0;
+}
+#else
+#define damos_va_migrate_pmd_entry NULL
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ struct list_head *migration_list = walk->private;
+ struct folio *folio;
+ pte_t ptent;
+
+ ptent = ptep_get(pte);
+ if (pte_none(ptent) || !pte_present(ptent))
+ return 0;
+
+ folio = damon_get_folio(pte_pfn(ptent));
+ if (!folio)
+ return 0;
+
+ if (!folio_isolate_lru(folio))
+ goto out;
+
+ list_add(&folio->lru, migration_list);
+
+out:
+ folio_put(folio);
+ return 0;
+}
+
/*
* Functions for the target validity check and cleanup
*/
@@ -653,6 +716,34 @@ static unsigned long damos_madvise(struct damon_target *target,
}
#endif /* CONFIG_ADVISE_SYSCALLS */
+static unsigned long damos_va_migrate(struct damon_target *target,
+ struct damon_region *r, struct damos *s,
+ unsigned long *sz_filter_passed)
+{
+ LIST_HEAD(folio_list);
+ struct mm_struct *mm;
+ unsigned long applied = 0;
+ struct mm_walk_ops walk_ops = {
+ .pmd_entry = damos_va_migrate_pmd_entry,
+ .pte_entry = damos_va_migrate_pte_entry,
+ .walk_lock = PGWALK_RDLOCK,
+ };
+
+ mm = damon_get_mm(target);
+ if (!mm)
+ return 0;
+
+ mmap_read_lock(mm);
+ walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &folio_list);
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ applied = damon_migrate_pages(&folio_list, s->target_nid);
+ cond_resched();
+
+ return applied * PAGE_SIZE;
+}
+
static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
struct damon_target *t, struct damon_region *r,
struct damos *scheme, unsigned long *sz_filter_passed)
@@ -675,6 +766,9 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
case DAMOS_NOHUGEPAGE:
madv_action = MADV_NOHUGEPAGE;
break;
+ case DAMOS_MIGRATE_HOT:
+ case DAMOS_MIGRATE_COLD:
+ return damos_va_migrate(t, r, scheme, sz_filter_passed);
case DAMOS_STAT:
return 0;
default:
@@ -695,6 +789,10 @@ static int damon_va_scheme_score(struct damon_ctx *context,
switch (scheme->action) {
case DAMOS_PAGEOUT:
return damon_cold_score(context, r, scheme);
+ case DAMOS_MIGRATE_HOT:
+ return damon_hot_score(context, r, scheme);
+ case DAMOS_MIGRATE_COLD:
+ return damon_cold_score(context, r, scheme);
default:
break;
}
--
2.43.0
next prev parent reply other threads:[~2025-07-09 1:01 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-09 0:59 [PATCH v4 00/13] mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 01/13] mm/damon: add struct damos_migrate_dests Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 02/13] mm/damon/core: add damos->migrate_dests field Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 03/13] mm/damon/sysfs-schemes: implement DAMOS action destinations directory Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 04/13] mm/damon/sysfs-schemes: set damos->migrate_dests Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 05/13] Docs/ABI/damon: document schemes dests directory Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 06/13] Docs/admin-guide/mm/damon/usage: document " Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 07/13] mm/damon/core: Commit damos->migrate_dests Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 08/13] mm/damon: Move migration helpers from paddr to ops-common Bijan Tabatabai
2025-07-09 0:59 ` Bijan Tabatabai [this message]
2025-07-09 0:59 ` [PATCH v4 10/13] Docs/mm/damon/design: Document vaddr support for migrate_{hot,cold} Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 11/13] mm/damon/vaddr: Use damos->migrate_dests in migrate_{hot,cold} Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 12/13] mm/damon: Move folio filtering from paddr to ops-common Bijan Tabatabai
2025-07-09 0:59 ` [PATCH v4 13/13] mm/damon/vaddr: Apply filters in migrate_{hot/cold} Bijan Tabatabai
2025-07-09 11:06 ` [PATCH v4 00/13] mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions SeongJae Park
2025-07-09 14:07 ` Bijan Tabatabai
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250709005952.17776-10-bijan311@gmail.com \
--to=bijan311@gmail.com \
--cc=ajayjoshi@micron.com \
--cc=akpm@linux-foundation.org \
--cc=bijantabatab@micron.com \
--cc=corbet@lwn.net \
--cc=damon@lists.linux.dev \
--cc=emirakhur@micron.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ravis.opensrc@micron.com \
--cc=sj@kernel.org \
--cc=venkataravis@micron.com \
--cc=vtavarespetr@micron.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).