All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Huang, Ying" <ying.huang@linux.alibaba.com>
To: Shivank Garg <shivankg@amd.com>
Cc: <akpm@linux-foundation.org>,  <david@kernel.org>,
	<lorenzo.stoakes@oracle.com>,  <Liam.Howlett@oracle.com>,
	<vbabka@kernel.org>,  <willy@infradead.org>,  <rppt@kernel.org>,
	<surenb@google.com>,  <mhocko@suse.com>,  <ziy@nvidia.com>,
	<matthew.brost@intel.com>,  <joshua.hahnjy@gmail.com>,
	<rakie.kim@sk.com>,  <byungchul@sk.com>,  <gourry@gourry.net>,
	<apopple@nvidia.com>,  <dave@stgolabs.net>,
	<Jonathan.Cameron@huawei.com>,  <rkodsara@amd.com>,
	 <vkoul@kernel.org>, <bharata@amd.com>,  <sj@kernel.org>,
	 <weixugc@google.com>, <dan.j.williams@intel.com>,
	 <rientjes@google.com>, <xuezhengchu@huawei.com>,
	 <yiannis@zptcorp.com>, <dave.hansen@intel.com>,
	 <hannes@cmpxchg.org>,  <jhubbard@nvidia.com>,
	<peterx@redhat.com>,  <riel@surriel.com>,
	 <shakeel.butt@linux.dev>, <stalexan@redhat.com>,
	 <tj@kernel.org>,  <nifan.cxl@gmail.com>,
	<linux-kernel@vger.kernel.org>,  <linux-mm@kvack.org>
Subject: Re: [RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios
Date: Tue, 24 Mar 2026 16:22:35 +0800	[thread overview]
Message-ID: <87wlz1zlf8.fsf@DESKTOP-5N7EMDA> (raw)
In-Reply-To: <20260309120725.308854-8-shivankg@amd.com> (Shivank Garg's message of "Mon, 9 Mar 2026 12:07:25 +0000")

Shivank Garg <shivankg@amd.com> writes:

> Add a PAGE_ALREADY_COPIED flag to the dst->private migration state.
> When set, __migrate_folio() skips folio_mc_copy() and performs
> metadata-only migration. All callers currently pass
> already_copied=false. The batch-copy path enables it in a later patch.
>
> Move the dst->private state enum earlier in the file so
> __migrate_folio() and move_to_new_folio() can see PAGE_ALREADY_COPIED.
>
> Signed-off-by: Shivank Garg <shivankg@amd.com>
> ---
>  mm/migrate.c | 52 +++++++++++++++++++++++++++++++---------------------
>  1 file changed, 31 insertions(+), 21 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 1bf2cf8c44dd..1d8c1fb627c9 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -848,6 +848,18 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
>  }
>  EXPORT_SYMBOL(folio_migrate_flags);
>  
> +/*
> + * To record some information during migration, we use unused private
> + * field of struct folio of the newly allocated destination folio.
> + * This is safe because nobody is using it except us.
> + */
> +enum {
> +	PAGE_WAS_MAPPED = BIT(0),
> +	PAGE_WAS_MLOCKED = BIT(1),
> +	PAGE_ALREADY_COPIED = BIT(2),
> +	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED | PAGE_ALREADY_COPIED,
> +};
> +
>  /************************************************************
>   *                    Migration functions
>   ***********************************************************/
> @@ -857,14 +869,20 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
>  			   enum migrate_mode mode)
>  {
>  	int rc, expected_count = folio_expected_ref_count(src) + 1;
> +	bool already_copied = ((unsigned long)dst->private & PAGE_ALREADY_COPIED);
> +
> +	if (already_copied)
> +		dst->private = NULL;
>  
>  	/* Check whether src does not have extra refs before we do more work */
>  	if (folio_ref_count(src) != expected_count)
>  		return -EAGAIN;
>  
> -	rc = folio_mc_copy(dst, src);
> -	if (unlikely(rc))
> -		return rc;
> +	if (!already_copied) {
> +		rc = folio_mc_copy(dst, src);
> +		if (unlikely(rc))
> +			return rc;
> +	}
>  
>  	rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
>  	if (rc)
> @@ -1088,7 +1106,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
>   *     0 - success
>   */
>  static int move_to_new_folio(struct folio *dst, struct folio *src,
> -				enum migrate_mode mode)
> +		enum migrate_mode mode, bool already_copied)
>  {
>  	struct address_space *mapping = folio_mapping(src);
>  	int rc = -EAGAIN;
> @@ -1096,6 +1114,9 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
>  	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
>  	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
>  
> +	if (already_copied)
> +		dst->private = (void *)(unsigned long)PAGE_ALREADY_COPIED;
> +

IMHO, this appears to be an unusual way to pass arguments to a function.
Why not adjust the parameters of migrate_folio()?  How about turning enum
migrate_mode into a bitmask (migrate_flags)?

>  	if (!mapping)
>  		rc = migrate_folio(mapping, dst, src, mode);
>  	else if (mapping_inaccessible(mapping))
> @@ -1127,17 +1148,6 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
>  	return rc;
>  }
>  
> -/*
> - * To record some information during migration, we use unused private
> - * field of struct folio of the newly allocated destination folio.
> - * This is safe because nobody is using it except us.
> - */
> -enum {
> -	PAGE_WAS_MAPPED = BIT(0),
> -	PAGE_WAS_MLOCKED = BIT(1),
> -	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
> -};
> -
>  static void __migrate_folio_record(struct folio *dst,
>  				   int old_page_state,
>  				   struct anon_vma *anon_vma)
> @@ -1353,7 +1363,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
>  static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
>  			      struct folio *src, struct folio *dst,
>  			      enum migrate_mode mode, enum migrate_reason reason,
> -			      struct list_head *ret)
> +			      struct list_head *ret, bool already_copied)
>  {
>  	int rc;
>  	int old_page_state = 0;
> @@ -1371,7 +1381,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
>  		goto out_unlock_both;
>  	}
>  
> -	rc = move_to_new_folio(dst, src, mode);
> +	rc = move_to_new_folio(dst, src, mode, already_copied);
>  	if (rc)
>  		goto out;
>  
> @@ -1519,7 +1529,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
>  	}
>  
>  	if (!folio_mapped(src))
> -		rc = move_to_new_folio(dst, src, mode);
> +		rc = move_to_new_folio(dst, src, mode, false);
>  
>  	if (page_was_mapped)
>  		remove_migration_ptes(src, !rc ? dst : src, ttu);
> @@ -1703,7 +1713,7 @@ static void migrate_folios_move(struct list_head *src_folios,
>  		struct list_head *ret_folios,
>  		struct migrate_pages_stats *stats,
>  		int *retry, int *thp_retry, int *nr_failed,
> -		int *nr_retry_pages)
> +		int *nr_retry_pages, bool already_copied)
>  {
>  	struct folio *folio, *folio2, *dst, *dst2;
>  	bool is_thp;
> @@ -1720,7 +1730,7 @@ static void migrate_folios_move(struct list_head *src_folios,
>  
>  		rc = migrate_folio_move(put_new_folio, private,
>  				folio, dst, mode,
> -				reason, ret_folios);
> +				reason, ret_folios, already_copied);
>  		/*
>  		 * The rules are:
>  		 *	0: folio will be freed
> @@ -1977,7 +1987,7 @@ static int migrate_pages_batch(struct list_head *from,
>  		migrate_folios_move(&unmap_folios, &dst_folios,
>  				put_new_folio, private, mode, reason,
>  				ret_folios, stats, &retry, &thp_retry,
> -				&nr_failed, &nr_retry_pages);
> +				&nr_failed, &nr_retry_pages, false);
>  	}
>  	nr_failed += retry;
>  	stats->nr_thp_failed += thp_retry;

---
Best Regards,
Huang, Ying


  parent reply	other threads:[~2026-03-24  8:22 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-09 12:07 [RFC PATCH v4 0/6] Accelerate page migration with batch copying and hardware offload Shivank Garg
2026-03-09 12:07 ` [RFC PATCH v4 1/6] mm: introduce folios_mc_copy() for batch folio copying Shivank Garg
2026-03-12  9:41   ` David Hildenbrand (Arm)
2026-03-15 18:09     ` Garg, Shivank
2026-03-09 12:07 ` [RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios Shivank Garg
2026-03-12  9:44   ` David Hildenbrand (Arm)
2026-03-15 18:25     ` Garg, Shivank
2026-03-23 12:20       ` David Hildenbrand (Arm)
2026-03-24  8:22   ` Huang, Ying [this message]
2026-04-03 11:08     ` Garg, Shivank
2026-04-07  6:52       ` Huang, Ying
2026-04-23 12:20         ` Garg, Shivank
2026-03-09 12:07 ` [RFC PATCH v4 3/6] mm/migrate: add batch-copy path in migrate_pages_batch Shivank Garg
2026-03-24  8:42   ` Huang, Ying
2026-04-03 11:09     ` Garg, Shivank
2026-03-09 12:07 ` [RFC PATCH v4 4/6] mm/migrate: add copy offload registration infrastructure Shivank Garg
2026-03-09 17:54   ` Gregory Price
2026-03-10 10:07     ` Garg, Shivank
2026-03-24 10:54   ` Huang, Ying
2026-04-03 11:11     ` Garg, Shivank
2026-04-07  7:40       ` Huang, Ying
2026-04-28 12:10       ` Garg, Shivank
2026-04-30  1:23         ` Huang, Ying
2026-03-09 12:07 ` [RFC PATCH v4 5/6] drivers/migrate_offload: add DMA batch copy driver (dcbm) Shivank Garg
2026-03-09 18:04   ` Gregory Price
2026-03-12  9:33     ` Garg, Shivank
2026-03-24  8:10   ` Huang, Ying
2026-04-03 11:06     ` Garg, Shivank
2026-04-23 12:10   ` Garg, Shivank
2026-04-23 14:13     ` Vinod Koul
2026-04-24 11:26       ` Garg, Shivank
2026-03-09 12:07 ` [RFC PATCH v4 6/6] mm/migrate: adjust NR_MAX_BATCHED_MIGRATION for testing Shivank Garg
2026-03-18 14:29 ` [RFC PATCH v4 0/6] Accelerate page migration with batch copying and hardware offload Garg, Shivank

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87wlz1zlf8.fsf@DESKTOP-5N7EMDA \
    --to=ying.huang@linux.alibaba.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=apopple@nvidia.com \
    --cc=bharata@amd.com \
    --cc=byungchul@sk.com \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@intel.com \
    --cc=dave@stgolabs.net \
    --cc=david@kernel.org \
    --cc=gourry@gourry.net \
    --cc=hannes@cmpxchg.org \
    --cc=jhubbard@nvidia.com \
    --cc=joshua.hahnjy@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lorenzo.stoakes@oracle.com \
    --cc=matthew.brost@intel.com \
    --cc=mhocko@suse.com \
    --cc=nifan.cxl@gmail.com \
    --cc=peterx@redhat.com \
    --cc=rakie.kim@sk.com \
    --cc=riel@surriel.com \
    --cc=rientjes@google.com \
    --cc=rkodsara@amd.com \
    --cc=rppt@kernel.org \
    --cc=shakeel.butt@linux.dev \
    --cc=shivankg@amd.com \
    --cc=sj@kernel.org \
    --cc=stalexan@redhat.com \
    --cc=surenb@google.com \
    --cc=tj@kernel.org \
    --cc=vbabka@kernel.org \
    --cc=vkoul@kernel.org \
    --cc=weixugc@google.com \
    --cc=willy@infradead.org \
    --cc=xuezhengchu@huawei.com \
    --cc=yiannis@zptcorp.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.