linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Ryan Roberts <ryan.roberts@arm.com>
To: Baolin Wang <baolin.wang@linux.alibaba.com>,
	akpm@linux-foundation.org, hughd@google.com
Cc: willy@infradead.org, david@redhat.com, 21cnbao@gmail.com,
	ziy@nvidia.com, ioworker0@gmail.com, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH v2 3/3] mm: shmem: move shmem_huge_global_enabled() into shmem_allowable_huge_orders()
Date: Mon, 15 Jul 2024 14:36:41 +0100	[thread overview]
Message-ID: <b670b981-bc79-4c8c-8b69-4879300066d4@arm.com> (raw)
In-Reply-To: <16dad33282617ecb90a3f466b35273b10be68d21.1720755678.git.baolin.wang@linux.alibaba.com>

On 13/07/2024 14:24, Baolin Wang wrote:
> Move shmem_huge_global_enabled() into the shmem_allowable_huge_orders() function,
> so that shmem_allowable_huge_orders() can also help to find the allowable huge
> orders for tmpfs. Moreover the shmem_huge_global_enabled() can become static.
> 
> No functional changes.
> 
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>

one nit below, but either way:

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

> ---
>  include/linux/shmem_fs.h | 12 ++----------
>  mm/huge_memory.c         | 12 +++---------
>  mm/shmem.c               | 41 ++++++++++++++++++++++++++--------------
>  3 files changed, 32 insertions(+), 33 deletions(-)
> 
> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> index 405ee8d3589a..1564d7d3ca61 100644
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -111,21 +111,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
>  int shmem_unuse(unsigned int type);
>  
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force,
> -				      struct mm_struct *mm, unsigned long vm_flags);
>  unsigned long shmem_allowable_huge_orders(struct inode *inode,
>  				struct vm_area_struct *vma, pgoff_t index,
> -				bool global_huge);
> +				bool shmem_huge_force);
>  #else
> -static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> -						      bool shmem_huge_force, struct mm_struct *mm,
> -						      unsigned long vm_flags)
> -{
> -	return false;
> -}
>  static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
>  				struct vm_area_struct *vma, pgoff_t index,
> -				bool global_huge)
> +				bool shmem_huge_force)
>  {
>  	return 0;
>  }
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index cc9bad12be75..f69980b5b5fc 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -151,16 +151,10 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
>  	 * Must be done before hugepage flags check since shmem has its
>  	 * own flags.
>  	 */
> -	if (!in_pf && shmem_file(vma->vm_file)) {
> -		bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file),
> -							     vma->vm_pgoff, !enforce_sysfs,
> -							     vma->vm_mm, vm_flags);
> -
> -		if (!vma_is_anon_shmem(vma))
> -			return global_huge ? orders : 0;
> +	if (!in_pf && shmem_file(vma->vm_file))
>  		return shmem_allowable_huge_orders(file_inode(vma->vm_file),
> -							vma, vma->vm_pgoff, global_huge);
> -	}
> +						   vma, vma->vm_pgoff,
> +						   !enforce_sysfs);
>  
>  	if (!vma_is_anonymous(vma)) {
>  		/*
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 1445dcd39b6f..4d274f5a17d9 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -581,7 +581,7 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>  	}
>  }
>  
> -bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>  		   bool shmem_huge_force, struct mm_struct *mm,
>  		   unsigned long vm_flags)
>  {
> @@ -772,6 +772,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
>  {
>  	return 0;
>  }
> +
> +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> +		bool shmem_huge_force, struct mm_struct *mm,
> +		unsigned long vm_flags)
> +{
> +	return false;
> +}
>  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>  
>  /*
> @@ -1625,27 +1632,39 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  unsigned long shmem_allowable_huge_orders(struct inode *inode,
>  				struct vm_area_struct *vma, pgoff_t index,
> -				bool global_huge)
> +				bool shmem_huge_force)
>  {
>  	unsigned long mask = READ_ONCE(huge_shmem_orders_always);
>  	unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
> -	unsigned long vm_flags = vma->vm_flags;
> +	unsigned long vm_flags = vma ? vma->vm_flags : 0;
> +	struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;

nit: rather than deriving the fault_mm here, I wonder if its cleaner to just
pass vma to shmem_huge_global_enabled()? shmem_huge_global_enabled() is just
using it as a guard to access vm_flags, which you can just as easily do by
testing the vma for non-NULL. And you can access mm flags with vma->vm_mm->flags
after testing the vma too.

>  	/*
>  	 * Check all the (large) orders below HPAGE_PMD_ORDER + 1 that
>  	 * are enabled for this vma.
>  	 */
>  	unsigned long orders = BIT(PMD_ORDER + 1) - 1;
> +	bool global_huge;
>  	loff_t i_size;
>  	int order;
>  
> -	if ((vm_flags & VM_NOHUGEPAGE) ||
> -	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
> +	if (vma && ((vm_flags & VM_NOHUGEPAGE) ||
> +	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
>  		return 0;
>  
>  	/* If the hardware/firmware marked hugepage support disabled. */
>  	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
>  		return 0;
>  
> +	global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force,
> +						fault_mm, vm_flags);
> +	if (!vma || !vma_is_anon_shmem(vma)) {
> +		/*
> +		 * For tmpfs, we now only support PMD sized THP if huge page
> +		 * is enabled, otherwise fallback to order 0.
> +		 */
> +		return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
> +	}
> +
>  	/*
>  	 * Following the 'deny' semantics of the top level, force the huge
>  	 * option off from all mounts.
> @@ -2081,7 +2100,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
>  	struct mm_struct *fault_mm;
>  	struct folio *folio;
>  	int error;
> -	bool alloced, huge;
> +	bool alloced;
>  	unsigned long orders = 0;
>  
>  	if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
> @@ -2154,14 +2173,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
>  		return 0;
>  	}
>  
> -	huge = shmem_huge_global_enabled(inode, index, false, fault_mm,
> -			     vma ? vma->vm_flags : 0);
> -	/* Find hugepage orders that are allowed for anonymous shmem. */
> -	if (vma && vma_is_anon_shmem(vma))
> -		orders = shmem_allowable_huge_orders(inode, vma, index, huge);
> -	else if (huge)
> -		orders = BIT(HPAGE_PMD_ORDER);
> -
> +	/* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
> +	orders = shmem_allowable_huge_orders(inode, vma, index, false);
>  	if (orders > 0) {
>  		gfp_t huge_gfp;
>  



  reply	other threads:[~2024-07-15 13:36 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-13 13:24 [PATCH v2 0/3] Some cleanups for shmem Baolin Wang
2024-07-13 13:24 ` [PATCH v2 1/3] mm: shmem: simplify the suitable huge orders validation for tmpfs Baolin Wang
2024-07-15 13:30   ` Ryan Roberts
2024-07-25 13:07   ` David Hildenbrand
2024-07-13 13:24 ` [PATCH v2 2/3] mm: shmem: rename shmem_is_huge() to shmem_huge_global_enabled() Baolin Wang
2024-07-15 13:32   ` Ryan Roberts
2024-07-25 13:08   ` David Hildenbrand
2024-07-13 13:24 ` [PATCH v2 3/3] mm: shmem: move shmem_huge_global_enabled() into shmem_allowable_huge_orders() Baolin Wang
2024-07-15 13:36   ` Ryan Roberts [this message]
2024-07-22  2:41     ` Baolin Wang
2024-07-25 13:09       ` David Hildenbrand
2024-07-26  1:09         ` Baolin Wang
2024-07-24 19:14 ` [PATCH v2 0/3] Some cleanups for shmem Andrew Morton
2024-07-24 19:15   ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b670b981-bc79-4c8c-8b69-4879300066d4@arm.com \
    --to=ryan.roberts@arm.com \
    --cc=21cnbao@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=ioworker0@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=willy@infradead.org \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).