From: Shivank Garg <shivankg@amd.com>
To: <akpm@linux-foundation.org>, <david@kernel.org>
Cc: <kinseyho@google.com>, <weixugc@google.com>, <ljs@kernel.org>,
<Liam.Howlett@oracle.com>, <vbabka@kernel.org>,
<willy@infradead.org>, <rppt@kernel.org>, <surenb@google.com>,
<mhocko@suse.com>, <ziy@nvidia.com>, <matthew.brost@intel.com>,
<joshua.hahnjy@gmail.com>, <rakie.kim@sk.com>, <byungchul@sk.com>,
<gourry@gourry.net>, <ying.huang@linux.alibaba.com>,
<apopple@nvidia.com>, <dave@stgolabs.net>,
<Jonathan.Cameron@huawei.com>, <rkodsara@amd.com>,
<vkoul@kernel.org>, <bharata@amd.com>, <sj@kernel.org>,
<rientjes@google.com>, <xuezhengchu@huawei.com>,
<yiannis@zptcorp.com>, <dave.hansen@intel.com>,
<hannes@cmpxchg.org>, <jhubbard@nvidia.com>, <peterx@redhat.com>,
<riel@surriel.com>, <shakeel.butt@linux.dev>,
<stalexan@redhat.com>, <tj@kernel.org>, <nifan.cxl@gmail.com>,
<jic23@kernel.org>, <aneesh.kumar@kernel.org>,
<nathan.lynch@amd.com>, <Frank.li@nxp.com>, <djbw@kernel.org>,
<linux-kernel@vger.kernel.org>, <linux-mm@kvack.org>,
Shivank Garg <shivankg@amd.com>
Subject: [PATCH 4/7] mm/migrate: add batch-copy path in migrate_pages_batch
Date: Tue, 28 Apr 2026 15:50:45 +0000 [thread overview]
Message-ID: <20260428155043.39251-10-shivankg@amd.com> (raw)
In-Reply-To: <20260428155043.39251-2-shivankg@amd.com>
Add folios_mc_copy() which walks list of src and dst folios in lockstep,
and copies folio content via folio_mc_copy(). folios_cnt parameter is
unused here, but is part of the offload_copy callback signature used by
later patches in the series.
Split unmapped folios into batch-eligible (unmap_batch/dst_batch) and
standard (unmap_single/dst_single) lists, gated by the
migrate_offload_enabled which is off by default. So, when no offload
driver is active, the branch is never taken and everything goes
through the standard path.
After TLB flush, batch copy the eligible folios via folios_mc_copy()
and pass already_copied=true into migrate_folios_move() so
__migrate_folio() skips the per-folio copy.
On batch copy failure, already_copied flag stays false and each folio
fall back to individual copy.
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
include/linux/mm.h | 2 ++
mm/migrate.c | 61 +++++++++++++++++++++++++++++++++++-----------
mm/util.c | 30 +++++++++++++++++++++++
3 files changed, 79 insertions(+), 14 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0b776907152e..e6ab9bc3de8f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1917,6 +1917,8 @@ void __folio_put(struct folio *folio);
void split_page(struct page *page, unsigned int order);
void folio_copy(struct folio *dst, struct folio *src);
int folio_mc_copy(struct folio *dst, struct folio *src);
+int folios_mc_copy(struct list_head *dst_list, struct list_head *src_list,
+ unsigned int __always_unused folios_cnt);
unsigned long nr_free_buffer_pages(void);
diff --git a/mm/migrate.c b/mm/migrate.c
index c493e67e359d..6c2f1cb66f96 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -43,6 +43,7 @@
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
#include <linux/pagewalk.h>
+#include <linux/jump_label.h>
#include <asm/tlbflush.h>
@@ -51,6 +52,8 @@
#include "internal.h"
#include "swap.h"
+DEFINE_STATIC_KEY_FALSE(migrate_offload_enabled);
+
static const struct movable_operations *offline_movable_ops;
static const struct movable_operations *zsmalloc_movable_ops;
@@ -1724,6 +1727,12 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
return nr_failed;
}
+/* movable_ops folios have their own migrate path */
+static bool folio_supports_batch_copy(struct folio *folio)
+{
+ return likely(!page_has_movable_ops(&folio->page));
+}
+
static void migrate_folios_move(struct list_head *src_folios,
struct list_head *dst_folios,
free_folio_t put_new_folio, unsigned long private,
@@ -1752,7 +1761,7 @@ static void migrate_folios_move(struct list_head *src_folios,
/*
* The rules are:
* 0: folio will be freed
- * -EAGAIN: stay on the unmap_folios list
+ * -EAGAIN: stay on the src_folios list
* Other errno: put on ret_folios list
*/
switch (rc) {
@@ -1823,8 +1832,12 @@ static int migrate_pages_batch(struct list_head *from,
bool is_large = false;
struct folio *folio, *folio2, *dst = NULL;
int rc, rc_saved = 0, nr_pages;
- LIST_HEAD(unmap_folios);
- LIST_HEAD(dst_folios);
+ unsigned int nr_batch = 0;
+ bool batch_copied = false;
+ LIST_HEAD(unmap_batch);
+ LIST_HEAD(dst_batch);
+ LIST_HEAD(unmap_single);
+ LIST_HEAD(dst_single);
bool nosplit = (reason == MR_NUMA_MISPLACED);
VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
@@ -1919,8 +1932,8 @@ static int migrate_pages_batch(struct list_head *from,
private, folio, &dst, mode, ret_folios);
/*
* The rules are:
- * 0: folio will be put on unmap_folios list,
- * dst folio put on dst_folios list
+ * 0: folio put on unmap_batch or unmap_single,
+ * dst folio put on dst_batch or dst_single
* -EAGAIN: stay on the from list
* -ENOMEM: stay on the from list
* Other errno: put on ret_folios list
@@ -1961,7 +1974,7 @@ static int migrate_pages_batch(struct list_head *from,
/* nr_failed isn't updated for not used */
stats->nr_thp_failed += thp_retry;
rc_saved = rc;
- if (list_empty(&unmap_folios))
+ if (list_empty(&unmap_batch) && list_empty(&unmap_single))
goto out;
else
goto move;
@@ -1971,8 +1984,15 @@ static int migrate_pages_batch(struct list_head *from,
nr_retry_pages += nr_pages;
break;
case 0:
- list_move_tail(&folio->lru, &unmap_folios);
- list_add_tail(&dst->lru, &dst_folios);
+ if (static_branch_unlikely(&migrate_offload_enabled) &&
+ folio_supports_batch_copy(folio)) {
+ list_move_tail(&folio->lru, &unmap_batch);
+ list_add_tail(&dst->lru, &dst_batch);
+ nr_batch++;
+ } else {
+ list_move_tail(&folio->lru, &unmap_single);
+ list_add_tail(&dst->lru, &dst_single);
+ }
break;
default:
/*
@@ -1995,17 +2015,28 @@ static int migrate_pages_batch(struct list_head *from,
/* Flush TLBs for all unmapped folios */
try_to_unmap_flush();
+ /* Batch-copy eligible folios before the move phase */
+ if (!list_empty(&unmap_batch)) {
+ rc = folios_mc_copy(&dst_batch, &unmap_batch, nr_batch);
+ batch_copied = (rc == 0);
+ }
+
retry = 1;
for (pass = 0; pass < nr_pass && retry; pass++) {
retry = 0;
thp_retry = 0;
nr_retry_pages = 0;
- /* Move the unmapped folios */
- migrate_folios_move(&unmap_folios, &dst_folios,
- put_new_folio, private, mode, reason,
- ret_folios, stats, &retry, &thp_retry,
- &nr_failed, &nr_retry_pages, false);
+ if (!list_empty(&unmap_batch))
+ migrate_folios_move(&unmap_batch, &dst_batch, put_new_folio,
+ private, mode, reason, ret_folios, stats,
+ &retry, &thp_retry, &nr_failed,
+ &nr_retry_pages, batch_copied);
+ if (!list_empty(&unmap_single))
+ migrate_folios_move(&unmap_single, &dst_single, put_new_folio,
+ private, mode, reason, ret_folios, stats,
+ &retry, &thp_retry, &nr_failed,
+ &nr_retry_pages, false);
}
nr_failed += retry;
stats->nr_thp_failed += thp_retry;
@@ -2014,7 +2045,9 @@ static int migrate_pages_batch(struct list_head *from,
rc = rc_saved ? : nr_failed;
out:
/* Cleanup remaining folios */
- migrate_folios_undo(&unmap_folios, &dst_folios,
+ migrate_folios_undo(&unmap_batch, &dst_batch,
+ put_new_folio, private, ret_folios);
+ migrate_folios_undo(&unmap_single, &dst_single,
put_new_folio, private, ret_folios);
return rc;
diff --git a/mm/util.c b/mm/util.c
index 232c3930a662..77eeb285def1 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -778,6 +778,36 @@ int folio_mc_copy(struct folio *dst, struct folio *src)
}
EXPORT_SYMBOL(folio_mc_copy);
+/**
+ * folios_mc_copy - Copy the contents of list of folios.
+ * @dst_list: destination folio list.
+ * @src_list: source folio list.
+ * @folios_cnt: unused here, present for callback signature compatibility.
+ *
+ * Walks list of src and dst folios in lockstep and copies folio
+ * content via folio_mc_copy(). The caller must ensure both lists have
+ * the same number of entries. This may sleep.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int folios_mc_copy(struct list_head *dst_list, struct list_head *src_list,
+ unsigned int __always_unused folios_cnt)
+{
+ struct folio *src, *dst;
+ int ret;
+
+ dst = list_first_entry(dst_list, struct folio, lru);
+ list_for_each_entry(src, src_list, lru) {
+ ret = folio_mc_copy(dst, src);
+ if (ret)
+ return ret;
+ dst = list_next_entry(dst, lru);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(folios_mc_copy);
+
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
static int sysctl_overcommit_ratio __read_mostly = 50;
static unsigned long sysctl_overcommit_kbytes __read_mostly;
--
2.43.0
next prev parent reply other threads:[~2026-04-28 15:53 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-28 15:50 [PATCH 0/7] Accelerate page migration with batch copying and hardware offload Shivank Garg
2026-04-28 15:50 ` [PATCH 1/7] mm/migrate: rename PAGE_ migration flags to FOLIO_ Shivank Garg
2026-04-30 9:07 ` Huang, Ying
2026-04-28 15:50 ` [PATCH 2/7] mm/migrate: use migrate_info field instead of private Shivank Garg
2026-04-28 15:50 ` [PATCH 3/7] mm/migrate: skip data copy for already-copied folios Shivank Garg
2026-04-28 15:50 ` Shivank Garg [this message]
2026-04-28 15:50 ` [PATCH 5/7] mm/migrate: add copy offload registration infrastructure Shivank Garg
2026-04-28 15:50 ` [PATCH 6/7] drivers/migrate_offload: add DMA batch copy driver (dcbm) Shivank Garg
2026-04-28 15:50 ` [PATCH 7/7] mm/migrate: adjust NR_MAX_BATCHED_MIGRATION for testing Shivank Garg
2026-04-28 17:11 ` [PATCH 0/7] Accelerate page migration with batch copying and hardware offload Garg, Shivank
2026-04-28 19:33 ` David Hildenbrand (Arm)
2026-04-29 5:51 ` Garg, Shivank
2026-04-30 8:47 ` Huang, Ying
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260428155043.39251-10-shivankg@amd.com \
--to=shivankg@amd.com \
--cc=Frank.li@nxp.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@kernel.org \
--cc=apopple@nvidia.com \
--cc=bharata@amd.com \
--cc=byungchul@sk.com \
--cc=dave.hansen@intel.com \
--cc=dave@stgolabs.net \
--cc=david@kernel.org \
--cc=djbw@kernel.org \
--cc=gourry@gourry.net \
--cc=hannes@cmpxchg.org \
--cc=jhubbard@nvidia.com \
--cc=jic23@kernel.org \
--cc=joshua.hahnjy@gmail.com \
--cc=kinseyho@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ljs@kernel.org \
--cc=matthew.brost@intel.com \
--cc=mhocko@suse.com \
--cc=nathan.lynch@amd.com \
--cc=nifan.cxl@gmail.com \
--cc=peterx@redhat.com \
--cc=rakie.kim@sk.com \
--cc=riel@surriel.com \
--cc=rientjes@google.com \
--cc=rkodsara@amd.com \
--cc=rppt@kernel.org \
--cc=shakeel.butt@linux.dev \
--cc=sj@kernel.org \
--cc=stalexan@redhat.com \
--cc=surenb@google.com \
--cc=tj@kernel.org \
--cc=vbabka@kernel.org \
--cc=vkoul@kernel.org \
--cc=weixugc@google.com \
--cc=willy@infradead.org \
--cc=xuezhengchu@huawei.com \
--cc=yiannis@zptcorp.com \
--cc=ying.huang@linux.alibaba.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox