All of lore.kernel.org
 help / color / mirror / Atom feed
From: Shivank Garg <shivankg@amd.com>
To: <akpm@linux-foundation.org>, <david@kernel.org>
Cc: <kinseyho@google.com>, <weixugc@google.com>, <ljs@kernel.org>,
	<Liam.Howlett@oracle.com>, <vbabka@kernel.org>,
	<willy@infradead.org>, <rppt@kernel.org>, <surenb@google.com>,
	<mhocko@suse.com>, <ziy@nvidia.com>, <matthew.brost@intel.com>,
	<joshua.hahnjy@gmail.com>, <rakie.kim@sk.com>, <byungchul@sk.com>,
	<gourry@gourry.net>, <ying.huang@linux.alibaba.com>,
	<apopple@nvidia.com>, <dave@stgolabs.net>,
	<Jonathan.Cameron@huawei.com>, <rkodsara@amd.com>,
	<vkoul@kernel.org>, <bharata@amd.com>, <sj@kernel.org>,
	<rientjes@google.com>, <xuezhengchu@huawei.com>,
	<yiannis@zptcorp.com>, <dave.hansen@intel.com>,
	<hannes@cmpxchg.org>, <jhubbard@nvidia.com>, <peterx@redhat.com>,
	<riel@surriel.com>, <shakeel.butt@linux.dev>,
	<stalexan@redhat.com>, <tj@kernel.org>, <nifan.cxl@gmail.com>,
	<jic23@kernel.org>, <aneesh.kumar@kernel.org>,
	<nathan.lynch@amd.com>, <Frank.li@nxp.com>, <djbw@kernel.org>,
	<linux-kernel@vger.kernel.org>, <linux-mm@kvack.org>,
	Shivank Garg <shivankg@amd.com>, Mike Day <michael.day@amd.com>
Subject: [PATCH 5/7] mm/migrate: add copy offload registration infrastructure
Date: Tue, 28 Apr 2026 15:50:47 +0000	[thread overview]
Message-ID: <20260428155043.39251-12-shivankg@amd.com> (raw)
In-Reply-To: <20260428155043.39251-2-shivankg@amd.com>

Add a registration interface that lets a single offload provider
(DMA, multi-threaded CPU copy, etc) take over the batch folio copy
performed by migrate_pages_batch().

The provider fills in a struct migrator with an offload_copy()
callback and calls migrate_offload_register(). Registration patches
the migrate_offload_copy() static_call and flips the
migrate_offload_enabled static branch. The migrate_offload_unregister()
reverts both.

Whether a migration reason is batch-copy eligible is decided by the
core in migrate_offload_do_batch(). A migrator may decline a particular
batch (e.g. when nr_batch is too small to amortize setup) by returning
-EOPNOTSUPP, and the move phase falls back to per-folio CPU copy.

Only one migrator can be active at a time. A second registration
returns -EBUSY, and only the active migrator can unregister itself.
The static_call dispatch is protected by SRCU so that the
synchronize_srcu() in unregister waits for all in-flight copy before
the module reference is dropped.

Co-developed-by: Mike Day <michael.day@amd.com>
Signed-off-by: Mike Day <michael.day@amd.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
 include/linux/migrate_copy_offload.h | 44 +++++++++++++
 mm/Kconfig                           |  6 ++
 mm/Makefile                          |  1 +
 mm/migrate.c                         | 57 +++++++++++++++--
 mm/migrate_copy_offload.c            | 94 ++++++++++++++++++++++++++++
 5 files changed, 198 insertions(+), 4 deletions(-)
 create mode 100644 include/linux/migrate_copy_offload.h
 create mode 100644 mm/migrate_copy_offload.c

diff --git a/include/linux/migrate_copy_offload.h b/include/linux/migrate_copy_offload.h
new file mode 100644
index 000000000000..d68b10a84743
--- /dev/null
+++ b/include/linux/migrate_copy_offload.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MIGRATE_COPY_OFFLOAD_H
+#define _LINUX_MIGRATE_COPY_OFFLOAD_H
+
+#include <linux/errno.h>
+#include <linux/jump_label.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+
+struct list_head;
+struct module;
+
+#define MIGRATOR_NAME_LEN 32
+
+/**
+ * struct migrator - batch-copy provider for page migration.
+ * @name: name of the provider.
+ * @offload_copy: copy @folio_cnt folios from @src_list to @dst_list.
+ *
+ *	The migrator may inspect @folio_cnt to decide whether the batch
+ * 	is worth offloading, e.g. skip when the batch is too small to
+ * 	amortize setup cost. If returns error, the core falls back to CPU copy.
+ *
+ * @owner: module providing the migrator.
+ */
+struct migrator {
+	char name[MIGRATOR_NAME_LEN];
+	int (*offload_copy)(struct list_head *dst_list,
+			    struct list_head *src_list,
+			    unsigned int folio_cnt);
+	struct module *owner;
+};
+
+#ifdef CONFIG_MIGRATION_COPY_OFFLOAD
+extern struct static_key_false migrate_offload_enabled;
+extern struct srcu_struct migrate_offload_srcu;
+int migrate_offload_register(struct migrator *m);
+int migrate_offload_unregister(struct migrator *m);
+#else
+static inline int migrate_offload_register(struct migrator *m) { return -EOPNOTSUPP; }
+static inline int migrate_offload_unregister(struct migrator *m) { return -EOPNOTSUPP; }
+#endif
+
+#endif /* _LINUX_MIGRATE_COPY_OFFLOAD_H */
diff --git a/mm/Kconfig b/mm/Kconfig
index e8bf1e9e6ad9..325d79619680 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -647,6 +647,12 @@ config MIGRATION
 config DEVICE_MIGRATION
 	def_bool MIGRATION && ZONE_DEVICE
 
+# Page-migration batch-copy offload infrastructure.
+# Selected by migrator drivers (e.g. CONFIG_DCBM_DMA).
+config MIGRATION_COPY_OFFLOAD
+	bool
+	depends on MIGRATION
+
 config ARCH_ENABLE_HUGEPAGE_MIGRATION
 	bool
 
diff --git a/mm/Makefile b/mm/Makefile
index 8ad2ab08244e..db1ac8097089 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_FAIL_PAGE_ALLOC) += fail_page_alloc.o
 obj-$(CONFIG_MEMTEST)		+= memtest.o
 obj-$(CONFIG_MIGRATION) += migrate.o
+obj-$(CONFIG_MIGRATION_COPY_OFFLOAD) += migrate_copy_offload.o
 obj-$(CONFIG_NUMA) += memory-tiers.o
 obj-$(CONFIG_DEVICE_MIGRATION) += migrate_device.o
 obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c2f1cb66f96..9af070f9a1f2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -44,6 +44,8 @@
 #include <linux/memory-tiers.h>
 #include <linux/pagewalk.h>
 #include <linux/jump_label.h>
+#include <linux/static_call.h>
+#include <linux/migrate_copy_offload.h>
 
 #include <asm/tlbflush.h>
 
@@ -54,6 +56,51 @@
 
 DEFINE_STATIC_KEY_FALSE(migrate_offload_enabled);
 
+#ifdef CONFIG_MIGRATION_COPY_OFFLOAD
+DEFINE_SRCU(migrate_offload_srcu);
+DEFINE_STATIC_CALL(migrate_offload_copy, folios_mc_copy);
+
+static bool migrate_offload_do_batch(int reason)
+{
+	if (!static_branch_unlikely(&migrate_offload_enabled))
+		return false;
+
+	switch (reason) {
+	case MR_COMPACTION:
+	case MR_SYSCALL:
+	case MR_DEMOTION:
+	case MR_NUMA_MISPLACED:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int migrate_offload_batch_copy(struct list_head *dst_batch,
+				      struct list_head *src_batch,
+				      unsigned int nr_batch)
+{
+	int idx, rc;
+
+	idx = srcu_read_lock(&migrate_offload_srcu);
+	rc = static_call(migrate_offload_copy)(dst_batch, src_batch, nr_batch);
+	srcu_read_unlock(&migrate_offload_srcu, idx);
+	return rc;
+}
+#else
+static bool migrate_offload_do_batch(int reason)
+{
+	return false;
+}
+
+static int migrate_offload_batch_copy(struct list_head *dst_batch,
+				      struct list_head *src_batch,
+				      unsigned int nr_batch)
+{
+	return -EOPNOTSUPP;
+}
+#endif
+
 static const struct movable_operations *offline_movable_ops;
 static const struct movable_operations *zsmalloc_movable_ops;
 
@@ -1833,7 +1880,7 @@ static int migrate_pages_batch(struct list_head *from,
 	struct folio *folio, *folio2, *dst = NULL;
 	int rc, rc_saved = 0, nr_pages;
 	unsigned int nr_batch = 0;
-	bool batch_copied = false;
+	bool do_batch = false, batch_copied = false;
 	LIST_HEAD(unmap_batch);
 	LIST_HEAD(dst_batch);
 	LIST_HEAD(unmap_single);
@@ -1843,6 +1890,8 @@ static int migrate_pages_batch(struct list_head *from,
 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
 			!list_empty(from) && !list_is_singular(from));
 
+	do_batch = migrate_offload_do_batch(reason);
+
 	for (pass = 0; pass < nr_pass && retry; pass++) {
 		retry = 0;
 		thp_retry = 0;
@@ -1984,8 +2033,7 @@ static int migrate_pages_batch(struct list_head *from,
 				nr_retry_pages += nr_pages;
 				break;
 			case 0:
-				if (static_branch_unlikely(&migrate_offload_enabled) &&
-				    folio_supports_batch_copy(folio)) {
+				if (do_batch && folio_supports_batch_copy(folio)) {
 					list_move_tail(&folio->lru, &unmap_batch);
 					list_add_tail(&dst->lru, &dst_batch);
 					nr_batch++;
@@ -2017,7 +2065,8 @@ static int migrate_pages_batch(struct list_head *from,
 
 	/* Batch-copy eligible folios before the move phase */
 	if (!list_empty(&unmap_batch)) {
-		rc = folios_mc_copy(&dst_batch, &unmap_batch, nr_batch);
+		rc = migrate_offload_batch_copy(&dst_batch, &unmap_batch,
+						nr_batch);
 		batch_copied = (rc == 0);
 	}
 
diff --git a/mm/migrate_copy_offload.c b/mm/migrate_copy_offload.c
new file mode 100644
index 000000000000..6f837c725239
--- /dev/null
+++ b/mm/migrate_copy_offload.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/jump_label.h>
+#include <linux/module.h>
+#include <linux/srcu.h>
+#include <linux/migrate.h>
+#include <linux/migrate_copy_offload.h>
+#include <linux/static_call.h>
+
+static DEFINE_MUTEX(migrator_mutex);
+static struct migrator *active_migrator;
+
+DECLARE_STATIC_CALL(migrate_offload_copy, folios_mc_copy);
+
+/**
+ * migrate_offload_register - register a batch-copy provider for page migration.
+ * @m: migrator to install.
+ *
+ * Only one provider can be active at a time, returns -EBUSY if another migrator
+ * is already registered.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int migrate_offload_register(struct migrator *m)
+{
+	int ret = 0;
+
+	if (!m || !m->offload_copy || !m->owner)
+		return -EINVAL;
+
+	mutex_lock(&migrator_mutex);
+	if (active_migrator) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	if (!try_module_get(m->owner)) {
+		ret = -ENODEV;
+		goto unlock;
+	}
+
+	static_call_update(migrate_offload_copy, m->offload_copy);
+	active_migrator = m;
+	static_branch_enable(&migrate_offload_enabled);
+
+unlock:
+	mutex_unlock(&migrator_mutex);
+
+	if (ret)
+		pr_err("migrate_offload: %s: failed to register (%d)\n",
+		       m->name, ret);
+	else
+		pr_info("migrate_offload: enabled by %s\n", m->name);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(migrate_offload_register);
+
+/**
+ * migrate_offload_unregister - unregister the active batch-copy provider.
+ * @m: migrator to remove (must be the currently active one).
+ *
+ * Reverts static_call targets and waits for SRCU grace period so that
+ * no in-flight migration is still calling the driver functions before
+ * releasing the module.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int migrate_offload_unregister(struct migrator *m)
+{
+	struct module *owner;
+
+	mutex_lock(&migrator_mutex);
+	if (active_migrator != m) {
+		mutex_unlock(&migrator_mutex);
+		return -EINVAL;
+	}
+
+	/*
+	 * Disable the static branch first so new migrate_pages_batch calls
+	 * won't enter the batch copy path.
+	 */
+	static_branch_disable(&migrate_offload_enabled);
+	static_call_update(migrate_offload_copy, folios_mc_copy);
+	owner = active_migrator->owner;
+	active_migrator = NULL;
+	mutex_unlock(&migrator_mutex);
+
+	/* Wait for all in-flight callers to finish before module_put(). */
+	synchronize_srcu(&migrate_offload_srcu);
+	module_put(owner);
+
+	pr_info("migrate_offload: disabled by %s\n", m->name);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(migrate_offload_unregister);
-- 
2.43.0



  parent reply	other threads:[~2026-04-28 15:54 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-28 15:50 [PATCH 0/7] Accelerate page migration with batch copying and hardware offload Shivank Garg
2026-04-28 15:50 ` [PATCH 1/7] mm/migrate: rename PAGE_ migration flags to FOLIO_ Shivank Garg
2026-04-30  9:07   ` Huang, Ying
2026-04-28 15:50 ` [PATCH 2/7] mm/migrate: use migrate_info field instead of private Shivank Garg
2026-05-07  9:43   ` Huang, Ying
2026-05-11 15:22   ` David Hildenbrand (Arm)
2026-04-28 15:50 ` [PATCH 3/7] mm/migrate: skip data copy for already-copied folios Shivank Garg
2026-05-11 15:35   ` David Hildenbrand (Arm)
2026-04-28 15:50 ` [PATCH 4/7] mm/migrate: add batch-copy path in migrate_pages_batch Shivank Garg
2026-05-11 15:40   ` David Hildenbrand (Arm)
2026-04-28 15:50 ` Shivank Garg [this message]
2026-05-11 15:46   ` [PATCH 5/7] mm/migrate: add copy offload registration infrastructure David Hildenbrand (Arm)
2026-05-11 15:50   ` David Hildenbrand (Arm)
2026-04-28 15:50 ` [PATCH 6/7] drivers/migrate_offload: add DMA batch copy driver (dcbm) Shivank Garg
2026-04-28 15:50 ` [PATCH 7/7] mm/migrate: adjust NR_MAX_BATCHED_MIGRATION for testing Shivank Garg
2026-04-28 17:11 ` [PATCH 0/7] Accelerate page migration with batch copying and hardware offload Garg, Shivank
2026-04-28 19:33   ` David Hildenbrand (Arm)
2026-04-29  5:51     ` Garg, Shivank
2026-04-30  8:47 ` Huang, Ying
2026-05-08 11:04   ` Garg, Shivank
2026-05-08 11:28     ` Huang, Ying
2026-05-08 12:34       ` Garg, Shivank
2026-05-09  7:49         ` Huang, Ying
2026-05-10 15:03           ` Garg, Shivank
2026-05-12  2:15             ` Huang, Ying
2026-05-07  9:58 ` Huang, Ying
2026-05-11 15:19   ` David Hildenbrand (Arm)
2026-05-12  1:45     ` Huang, Ying
2026-05-11 15:53 ` David Hildenbrand (Arm)
2026-05-12  2:35   ` Huang, Ying
2026-05-12  6:34     ` David Hildenbrand (Arm)
2026-05-14  6:42       ` Huang, Ying

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260428155043.39251-12-shivankg@amd.com \
    --to=shivankg@amd.com \
    --cc=Frank.li@nxp.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@kernel.org \
    --cc=apopple@nvidia.com \
    --cc=bharata@amd.com \
    --cc=byungchul@sk.com \
    --cc=dave.hansen@intel.com \
    --cc=dave@stgolabs.net \
    --cc=david@kernel.org \
    --cc=djbw@kernel.org \
    --cc=gourry@gourry.net \
    --cc=hannes@cmpxchg.org \
    --cc=jhubbard@nvidia.com \
    --cc=jic23@kernel.org \
    --cc=joshua.hahnjy@gmail.com \
    --cc=kinseyho@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=ljs@kernel.org \
    --cc=matthew.brost@intel.com \
    --cc=mhocko@suse.com \
    --cc=michael.day@amd.com \
    --cc=nathan.lynch@amd.com \
    --cc=nifan.cxl@gmail.com \
    --cc=peterx@redhat.com \
    --cc=rakie.kim@sk.com \
    --cc=riel@surriel.com \
    --cc=rientjes@google.com \
    --cc=rkodsara@amd.com \
    --cc=rppt@kernel.org \
    --cc=shakeel.butt@linux.dev \
    --cc=sj@kernel.org \
    --cc=stalexan@redhat.com \
    --cc=surenb@google.com \
    --cc=tj@kernel.org \
    --cc=vbabka@kernel.org \
    --cc=vkoul@kernel.org \
    --cc=weixugc@google.com \
    --cc=willy@infradead.org \
    --cc=xuezhengchu@huawei.com \
    --cc=yiannis@zptcorp.com \
    --cc=ying.huang@linux.alibaba.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.