All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexander Gordeev <agordeev@linux.ibm.com>
To: Minchan Kim <minchan@kernel.org>
Cc: akpm@linux-foundation.org, hca@linux.ibm.com,
	linux-s390@vger.kernel.org, david@kernel.org, mhocko@suse.com,
	brauner@kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, surenb@google.com,
	timmurray@google.com, Minchan Kim <minchan@google.com>
Subject: Re: [PATCH v1 1/3] mm: process_mrelease: expedite clean file folio reclaim via mmu_gather
Date: Tue, 5 May 2026 16:53:18 +0200	[thread overview]
Message-ID: <20260505145318.97692A14-agordeev@linux.ibm.com> (raw)
In-Reply-To: <20260421230239.172582-2-minchan@kernel.org>

On Tue, Apr 21, 2026 at 04:02:37PM -0700, Minchan Kim wrote:

Hi Minchan,

> Currently, process_mrelease() unmaps pages but file-backed pages are
> not evicted and stay in the pagecache, relying on standard memory reclaim
> (kswapd or direct reclaim) to eventually free them. This delays the
> immediate recovery of system memory under Android's LMKD scenarios,
> leading to redundant background apps kills.
> 
> This patch implements an expedited eviction mechanism for clean pagecache
> folios in the mmu_gather code, similar to how swapcache folios are handled.
> It drops them from the pagecache (i.e., evicting them) if they are completely
> unmapped during reaping.
> 
> Within this single unified loop, anonymous pages are released via
> free_swap_cache(), and file-backed folios are symmetrically released via
> free_file_cache().
> 
> Signed-off-by: Minchan Kim <minchan@kernel.org>
> ---
>  arch/s390/include/asm/tlb.h |  2 +-
>  include/linux/swap.h        |  5 ++---
>  mm/mmu_gather.c             |  7 ++++---
>  mm/swap.c                   | 42 +++++++++++++++++++++++++++++++++++++
>  mm/swap_state.c             | 26 -----------------------
>  5 files changed, 49 insertions(+), 33 deletions(-)
> 
> diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
> index 619fd41e710e..2736dbb571a8 100644
> --- a/arch/s390/include/asm/tlb.h
> +++ b/arch/s390/include/asm/tlb.h
> @@ -62,7 +62,7 @@ static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
>  	VM_WARN_ON_ONCE(delay_rmap);
>  	VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1));
>  
> -	free_pages_and_swap_cache(encoded_pages, ARRAY_SIZE(encoded_pages));
> +	free_pages_and_caches(tlb->mm, encoded_pages, ARRAY_SIZE(encoded_pages));
>  	return false;
>  }
>  
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 62fc7499b408..bdb784966343 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -414,7 +414,9 @@ extern int sysctl_min_unmapped_ratio;
>  extern int sysctl_min_slab_ratio;
>  #endif
>  
> +struct mm_struct;
>  void check_move_unevictable_folios(struct folio_batch *fbatch);
> +void free_pages_and_caches(struct mm_struct *mm, struct encoded_page **pages, int nr);
>  
>  extern void __meminit kswapd_run(int nid);
>  extern void __meminit kswapd_stop(int nid);
> @@ -433,7 +435,6 @@ static inline unsigned long total_swapcache_pages(void)
>  
>  void free_swap_cache(struct folio *folio);
>  void free_folio_and_swap_cache(struct folio *folio);
> -void free_pages_and_swap_cache(struct encoded_page **, int);
>  /* linux/mm/swapfile.c */
>  extern atomic_long_t nr_swap_pages;
>  extern long total_swap_pages;
> @@ -510,8 +511,6 @@ static inline void put_swap_device(struct swap_info_struct *si)
>  	do { (val)->freeswap = (val)->totalswap = 0; } while (0)
>  #define free_folio_and_swap_cache(folio) \
>  	folio_put(folio)
> -#define free_pages_and_swap_cache(pages, nr) \
> -	release_pages((pages), (nr));
>  
>  static inline void free_swap_cache(struct folio *folio)
>  {
> diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
> index fe5b6a031717..3c6c315d3c48 100644
> --- a/mm/mmu_gather.c
> +++ b/mm/mmu_gather.c
> @@ -100,7 +100,8 @@ void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma)
>   */
>  #define MAX_NR_FOLIOS_PER_FREE		512
>  
> -static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch)
> +static void __tlb_batch_free_encoded_pages(struct mm_struct *mm,
> +		struct mmu_gather_batch *batch)
>  {
>  	struct encoded_page **pages = batch->encoded_pages;
>  	unsigned int nr, nr_pages;
> @@ -135,7 +136,7 @@ static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch)
>  			}
>  		}
>  
> -		free_pages_and_swap_cache(pages, nr);
> +		free_pages_and_caches(mm, pages, nr);
>  		pages += nr;
>  		batch->nr -= nr;
>  
> @@ -148,7 +149,7 @@ static void tlb_batch_pages_flush(struct mmu_gather *tlb)
>  	struct mmu_gather_batch *batch;
>  
>  	for (batch = &tlb->local; batch && batch->nr; batch = batch->next)
> -		__tlb_batch_free_encoded_pages(batch);
> +		__tlb_batch_free_encoded_pages(tlb->mm, batch);
>  	tlb->active = &tlb->local;
>  }
>  
> diff --git a/mm/swap.c b/mm/swap.c
> index bb19ccbece46..e44bc8cefceb 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -1043,6 +1043,48 @@ void release_pages(release_pages_arg arg, int nr)
>  }
>  EXPORT_SYMBOL(release_pages);
>  
> +static inline void free_file_cache(struct folio *folio)
> +{
> +	if (folio_trylock(folio)) {
> +		mapping_evict_folio(folio_mapping(folio), folio);
> +		folio_unlock(folio);
> +	}
> +}
> +
> +/*
> + * Passed an array of pages, drop them all from swapcache and then release
> + * them.  They are removed from the LRU and freed if this is their last use.
> + *
> + * If @try_evict_file_folios is true, this function will proactively evict clean
> + * file-backed folios if they are no longer mapped.
> + */
> +void free_pages_and_caches(struct mm_struct *mm, struct encoded_page **pages, int nr)
> +{
> +	bool try_evict_file_folios = mm_flags_test(MMF_UNSTABLE, mm);
> +	struct folio_batch folios;
> +	unsigned int refs[PAGEVEC_SIZE];
> +
> +	folio_batch_init(&folios);
> +	for (int i = 0; i < nr; i++) {
> +		struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
> +
> +		if (folio_test_anon(folio))
> +			free_swap_cache(folio);
> +		else if (unlikely(try_evict_file_folios))
> +			free_file_cache(folio);

This condition is absent in free_pages_and_swap_cache().
What would happen with non-anon and non-evict folio?

> +
> +		refs[folios.nr] = 1;
> +		if (unlikely(encoded_page_flags(pages[i]) &
> +			     ENCODED_PAGE_BIT_NR_PAGES_NEXT))
> +			refs[folios.nr] = encoded_nr_pages(pages[++i]);
> +
> +		if (folio_batch_add(&folios, folio) == 0)
> +			folios_put_refs(&folios, refs);
> +	}
> +	if (folios.nr)
> +		folios_put_refs(&folios, refs);
> +}
> +
>  /*
>   * The folios which we're about to release may be in the deferred lru-addition
>   * queues.  That would prevent them from really being freed right now.  That's
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index 6d0eef7470be..7576bf36d920 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -400,32 +400,6 @@ void free_folio_and_swap_cache(struct folio *folio)
>  		folio_put(folio);
>  }
>  
> -/*
> - * Passed an array of pages, drop them all from swapcache and then release
> - * them.  They are removed from the LRU and freed if this is their last use.
> - */
> -void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
> -{
> -	struct folio_batch folios;
> -	unsigned int refs[PAGEVEC_SIZE];
> -
> -	folio_batch_init(&folios);
> -	for (int i = 0; i < nr; i++) {
> -		struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
> -
> -		free_swap_cache(folio);
> -		refs[folios.nr] = 1;
> -		if (unlikely(encoded_page_flags(pages[i]) &
> -			     ENCODED_PAGE_BIT_NR_PAGES_NEXT))
> -			refs[folios.nr] = encoded_nr_pages(pages[++i]);
> -
> -		if (folio_batch_add(&folios, folio) == 0)
> -			folios_put_refs(&folios, refs);
> -	}
> -	if (folios.nr)
> -		folios_put_refs(&folios, refs);
> -}
> -
>  static inline bool swap_use_vma_readahead(void)
>  {
>  	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
> -- 
> 2.54.0.rc1.555.g9c883467ad-goog
> 
> 


  parent reply	other threads:[~2026-05-05 14:53 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-21 23:02 [PATCH v1 0/3] mm: process_mrelease: expedite clean file folio reclaim and add auto-kill Minchan Kim
2026-04-21 23:02 ` [PATCH v1 1/3] mm: process_mrelease: expedite clean file folio reclaim via mmu_gather Minchan Kim
2026-04-24  7:56   ` David Hildenbrand (Arm)
2026-04-24 21:24     ` Minchan Kim
2026-04-27  9:29       ` David Hildenbrand (Arm)
2026-04-27 22:04         ` Minchan Kim
2026-04-24 19:33   ` Matthew Wilcox
2026-04-24 21:56     ` Minchan Kim
2026-05-05 14:53   ` Alexander Gordeev [this message]
2026-05-08 21:56     ` Minchan Kim
2026-05-11 16:13       ` Alexander Gordeev
2026-05-11 21:48         ` Minchan Kim
2026-04-21 23:02 ` [PATCH v1 2/3] mm: process_mrelease: skip LRU movement for exclusive file folios Minchan Kim
2026-04-22  7:22   ` Baolin Wang
2026-04-23 23:38     ` Minchan Kim
2026-04-24  7:51   ` Michal Hocko
2026-04-24  7:57     ` David Hildenbrand (Arm)
2026-04-24 19:15       ` Minchan Kim
2026-04-27  7:16         ` Michal Hocko
2026-04-27 16:48           ` Suren Baghdasaryan
2026-04-27 17:15             ` Michal Hocko
2026-04-27 23:05               ` Minchan Kim
2026-04-28  6:56                 ` Michal Hocko
2026-04-29  1:19                   ` Minchan Kim
2026-04-29  8:18                     ` Michal Hocko
2026-04-29  9:09                       ` David Hildenbrand (Arm)
2026-04-29 10:33                         ` Michal Hocko
2026-04-29 13:07                           ` David Hildenbrand (Arm)
2026-04-29 14:44                             ` Michal Hocko
2026-04-30  6:08                               ` David Hildenbrand (Arm)
2026-05-08 20:57                                 ` Liam R. Howlett
2026-05-11 13:05                                   ` David Hildenbrand (Arm)
2026-05-13  6:47                                   ` Michal Hocko
2026-04-29 21:41                         ` Minchan Kim
2026-04-30 14:38                           ` David Hildenbrand (Arm)
2026-04-29  8:55                     ` David Hildenbrand (Arm)
2026-04-29 21:42                       ` Minchan Kim
2026-04-24 19:26     ` Minchan Kim
2026-04-21 23:02 ` [PATCH v1 3/3] mm: process_mrelease: introduce PROCESS_MRELEASE_REAP_KILL flag Minchan Kim
2026-04-24  7:57   ` Michal Hocko
2026-04-24 22:49     ` Minchan Kim
2026-04-27  7:02       ` Michal Hocko
2026-04-27 22:03         ` Minchan Kim
2026-04-28  7:01           ` Michal Hocko
2026-04-28 22:37             ` Minchan Kim
2026-04-29  8:25               ` Michal Hocko
2026-04-29 20:01                 ` Suren Baghdasaryan
2026-04-29 21:17                   ` Minchan Kim
2026-04-29 21:16                 ` Minchan Kim
2026-04-27 20:34   ` Suren Baghdasaryan
2026-04-27 22:52     ` Minchan Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260505145318.97692A14-agordeev@linux.ibm.com \
    --to=agordeev@linux.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=brauner@kernel.org \
    --cc=david@kernel.org \
    --cc=hca@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=mhocko@suse.com \
    --cc=minchan@google.com \
    --cc=minchan@kernel.org \
    --cc=surenb@google.com \
    --cc=timmurray@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.