The Linux Kernel Mailing List
 help / color / mirror / Atom feed
From: "David Hildenbrand (Arm)" <david@kernel.org>
To: kasong@tencent.com, linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Zi Yan <ziy@nvidia.com>,
	Baolin Wang <baolin.wang@linux.alibaba.com>,
	Barry Song <baohua@kernel.org>, Hugh Dickins <hughd@google.com>,
	Chris Li <chrisl@kernel.org>,
	Kemeng Shi <shikemeng@huaweicloud.com>,
	Nhat Pham <nphamcs@gmail.com>, Baoquan He <bhe@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Youngjun Park <youngjun.park@lge.com>,
	Chengming Zhou <chengming.zhou@linux.dev>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	Shakeel Butt <shakeel.butt@linux.dev>,
	Muchun Song <muchun.song@linux.dev>,
	Qi Zheng <zhengqi.arch@bytedance.com>,
	linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
	Yosry Ahmed <yosry@kernel.org>, Lorenzo Stoakes <ljs@kernel.org>,
	Dev Jain <dev.jain@arm.com>, Lance Yang <lance.yang@linux.dev>,
	Michal Hocko <mhocko@suse.com>, Michal Hocko <mhocko@kernel.org>,
	Suren Baghdasaryan <surenb@google.com>,
	Axel Rasmussen <axelrasmussen@google.com>
Subject: Re: [PATCH v3 05/12] mm, swap: unify large folio allocation
Date: Mon, 11 May 2026 14:57:41 +0200	[thread overview]
Message-ID: <675e9027-9fb5-47b5-9a2d-c9a416a27d0d@kernel.org> (raw)
In-Reply-To: <20260421-swap-table-p4-v3-5-2f23759a76bc@tencent.com>

On 4/21/26 08:16, Kairui Song via B4 Relay wrote:
> From: Kairui Song <kasong@tencent.com>
> 
> Now that direct large order allocation is supported in the swap cache,
> both anon and shmem can use it instead of implementing their own methods.
> This unifies the fallback and swap cache check, which also reduces the
> TOCTOU race window of swap cache state: previously, high order swapin
> required checking swap cache states first, then allocating and falling
> back separately. Now all these steps happen in the same compact loop.
> 
> Order fallback and statistics are also unified, callers just need to
> check and pass the acceptable order bitmask.
> 
> There is basically no behavior change. This only makes things more
> unified and prepares for later commits. Cgroup and zero map checks can
> also be moved into the compact loop, further reducing race windows and
> redundancy
> 

You should spell out the rename from swapin_folio() to swapin_entry() [and why
it is done].

swapin_readahead() vs. swapin_entry() looks a bit odd, fiven that both consume
an entry.

> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
>  mm/memory.c     |  77 ++++++------------------------
>  mm/shmem.c      |  94 +++++++++---------------------------
>  mm/swap.h       |  30 ++----------
>  mm/swap_state.c | 145 ++++++++++----------------------------------------------
>  mm/swapfile.c   |   3 +-
>  5 files changed, 67 insertions(+), 282 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index ea6568571131..404734a5bcff 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4593,26 +4593,6 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
>  	return VM_FAULT_SIGBUS;
>  }
>  
> -static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
> -{
> -	struct vm_area_struct *vma = vmf->vma;
> -	struct folio *folio;
> -	softleaf_t entry;
> -
> -	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
> -	if (!folio)
> -		return NULL;
> -
> -	entry = softleaf_from_pte(vmf->orig_pte);
> -	if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
> -					   GFP_KERNEL, entry)) {
> -		folio_put(folio);
> -		return NULL;
> -	}
> -
> -	return folio;
> -}
> -
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  /*
>   * Check if the PTEs within a range are contiguous swap entries
> @@ -4642,8 +4622,6 @@ static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
>  	 */
>  	if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
>  		return false;
> -	if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
> -		return false;
>  

This should also be pointed out in the patch description. (and why it is ok)

>  	return true;
>  }
> @@ -4671,16 +4649,14 @@ static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
>  	return orders;
>  }
>  
> -static struct folio *alloc_swap_folio(struct vm_fault *vmf)
> +static unsigned long thp_swapin_suitable_orders(struct vm_fault *vmf)
>  {
>  	struct vm_area_struct *vma = vmf->vma;
>  	unsigned long orders;
> -	struct folio *folio;
>  	unsigned long addr;
>  	softleaf_t entry;
>  	spinlock_t *ptl;
>  	pte_t *pte;
> -	gfp_t gfp;
>  	int order;
>  
>  	/*
> @@ -4688,7 +4664,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
>  	 * maintain the uffd semantics.
>  	 */
>  	if (unlikely(userfaultfd_armed(vma)))
> -		goto fallback;
> +		return 0;
>  
>  	/*
>  	 * A large swapped out folio could be partially or fully in zswap. We
> @@ -4696,7 +4672,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
>  	 * folio.
>  	 */
>  	if (!zswap_never_enabled())
> -		goto fallback;
> +		return 0;
>  
>  	entry = softleaf_from_pte(vmf->orig_pte);
>  	/*
> @@ -4710,12 +4686,12 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
>  					  vmf->address, orders);
>  
>  	if (!orders)
> -		goto fallback;
> +		return 0;
>  
>  	pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
>  				  vmf->address & PMD_MASK, &ptl);
>  	if (unlikely(!pte))
> -		goto fallback;
> +		return 0;
>  
>  	/*
>  	 * For do_swap_page, find the highest order where the aligned range is
> @@ -4731,29 +4707,12 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
>  
>  	pte_unmap_unlock(pte, ptl);
>  
> -	/* Try allocating the highest of the remaining orders. */
> -	gfp = vma_thp_gfp_mask(vma);
> -	while (orders) {
> -		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
> -		folio = vma_alloc_folio(gfp, order, vma, addr);
> -		if (folio) {
> -			if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
> -							    gfp, entry))
> -				return folio;
> -			count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
> -			folio_put(folio);
> -		}
> -		count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
> -		order = next_order(&orders, order);
> -	}
> -
> -fallback:
> -	return __alloc_swap_folio(vmf);
> +	return orders;
>  }
>  #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
> -static struct folio *alloc_swap_folio(struct vm_fault *vmf)
> +static unsigned long thp_swapin_suitable_orders(struct vm_fault *vmf)
>  {
> -	return __alloc_swap_folio(vmf);
> +	return 0;
>  }
>  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>  
> @@ -4859,21 +4818,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  	if (folio)
>  		swap_update_readahead(folio, vma, vmf->address);
>  	if (!folio) {
> -		if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
> -			folio = alloc_swap_folio(vmf);
> -			if (folio) {
> -				/*
> -				 * folio is charged, so swapin can only fail due
> -				 * to raced swapin and return NULL.
> -				 */
> -				swapcache = swapin_folio(entry, folio);
> -				if (swapcache != folio)
> -					folio_put(folio);
> -				folio = swapcache;
> -			}
> -		} else {
> +		/* Swapin bypasses readahead for SWP_SYNCHRONOUS_IO devices */
> +		if (data_race(si->flags & SWP_SYNCHRONOUS_IO))
> +			folio = swapin_entry(entry, GFP_HIGHUSER_MOVABLE,
> +					     thp_swapin_suitable_orders(vmf),
> +					     vmf, NULL, 0);
> +		else
>  			folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf);
> -		}
>  
>  		if (!folio) {
>  			/*

Nothing else jumped at me in memory.c

-- 
Cheers,

David

  parent reply	other threads:[~2026-05-11 12:57 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20260421-swap-table-p4-v3-0-2f23759a76bc@tencent.com>
     [not found] ` <20260421-swap-table-p4-v3-1-2f23759a76bc@tencent.com>
2026-05-06 13:51   ` [PATCH v3 01/12] mm, swap: simplify swap cache allocation helper Chris Li
2026-05-11  8:57     ` Kairui Song
     [not found] ` <20260421-swap-table-p4-v3-2-2f23759a76bc@tencent.com>
2026-05-06 14:42   ` [PATCH v3 02/12] mm, swap: move common swap cache operations into standalone helpers Chris Li
     [not found] ` <20260421-swap-table-p4-v3-3-2f23759a76bc@tencent.com>
2026-05-06 14:46   ` [PATCH v3 03/12] mm/huge_memory: move THP gfp limit helper into header Chris Li
     [not found] ` <20260421-swap-table-p4-v3-4-2f23759a76bc@tencent.com>
2026-05-06 20:27   ` [PATCH v3 04/12] mm, swap: add support for stable large allocation in swap cache directly Chris Li
     [not found] ` <20260421-swap-table-p4-v3-5-2f23759a76bc@tencent.com>
2026-05-06 20:48   ` [PATCH v3 05/12] mm, swap: unify large folio allocation Chris Li
2026-05-11 12:57   ` David Hildenbrand (Arm) [this message]
2026-05-11 14:37     ` Kairui Song
2026-05-11 15:15       ` David Hildenbrand (Arm)
2026-05-11 16:44         ` Kairui Song
     [not found] ` <20260421-swap-table-p4-v3-6-2f23759a76bc@tencent.com>
2026-05-06 20:57   ` [PATCH v3 06/12] mm/memcg, swap: tidy up cgroup v1 memsw swap helpers Chris Li
     [not found] ` <20260421-swap-table-p4-v3-7-2f23759a76bc@tencent.com>
2026-05-08  4:01   ` [PATCH v3 07/12] mm, swap: support flexible batch freeing of slots in different memcgs Chris Li
     [not found] ` <20260421-swap-table-p4-v3-8-2f23759a76bc@tencent.com>
2026-05-08  4:46   ` [PATCH v3 08/12] mm, swap: delay and unify memcg lookup and charging for swapin Chris Li
     [not found] ` <20260421-swap-table-p4-v3-9-2f23759a76bc@tencent.com>
2026-05-08  5:02   ` [PATCH v3 09/12] mm, swap: consolidate cluster allocation helpers Chris Li
     [not found] ` <20260421-swap-table-p4-v3-10-2f23759a76bc@tencent.com>
2026-05-08 22:46   ` [PATCH v3 10/12] mm/memcg, swap: store cgroup id in cluster table directly Chris Li
     [not found] ` <20260421-swap-table-p4-v3-11-2f23759a76bc@tencent.com>
2026-05-08 22:47   ` [PATCH v3 11/12] mm/memcg: remove no longer used swap cgroup array Chris Li
     [not found] ` <20260421-swap-table-p4-v3-12-2f23759a76bc@tencent.com>
2026-05-11 16:30   ` [PATCH v3 12/12] mm, swap: merge zeromap into swap table Chris Li
2026-05-11 16:34 ` [PATCH v3 00/12] mm, swap: swap table phase IV: unify allocation and reduce static metadata Chris Li
     [not found] ` <CAMgjq7CJ8Are6m7X2UxUoJ=77c_oSpdG8-bzkmdRzwey2Cp1gQ@mail.gmail.com>
2026-05-11 21:12   ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=675e9027-9fb5-47b5-9a2d-c9a416a27d0d@kernel.org \
    --to=david@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=axelrasmussen@google.com \
    --cc=baohua@kernel.org \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=bhe@redhat.com \
    --cc=cgroups@vger.kernel.org \
    --cc=chengming.zhou@linux.dev \
    --cc=chrisl@kernel.org \
    --cc=dev.jain@arm.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=kasong@tencent.com \
    --cc=lance.yang@linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=ljs@kernel.org \
    --cc=mhocko@kernel.org \
    --cc=mhocko@suse.com \
    --cc=muchun.song@linux.dev \
    --cc=nphamcs@gmail.com \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeel.butt@linux.dev \
    --cc=shikemeng@huaweicloud.com \
    --cc=surenb@google.com \
    --cc=yosry@kernel.org \
    --cc=youngjun.park@lge.com \
    --cc=zhengqi.arch@bytedance.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox