linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Barry Song <baohua@kernel.org>
To: Ryan Roberts <ryan.roberts@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Hugh Dickins <hughd@google.com>,
	 Jonathan Corbet <corbet@lwn.net>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	 David Hildenbrand <david@redhat.com>,
	Lance Yang <ioworker0@gmail.com>,
	 Baolin Wang <baolin.wang@linux.alibaba.com>,
	linux-kernel@vger.kernel.org,  linux-mm@kvack.org
Subject: Re: [PATCH v1 1/2] mm: Cleanup count_mthp_stat() definition
Date: Thu, 11 Jul 2024 20:20:11 +1200	[thread overview]
Message-ID: <CAGsJ_4zzZAjsBU0H9yL55pKW5yym4D+YExTVqXC=wkyC4ixnMQ@mail.gmail.com> (raw)
In-Reply-To: <20240711072929.3590000-2-ryan.roberts@arm.com>

On Thu, Jul 11, 2024 at 7:29 PM Ryan Roberts <ryan.roberts@arm.com> wrote:
>
> Let's move count_mthp_stat() so that it's always defined, even when THP
> is disabled. Previously uses of the function in files such as shmem.c,
> which are compiled even when THP is disabled, required ugly THP
> ifdeferry. With this cleanup, we can remove those ifdefs and the
> function resolves to a nop when THP is disabled.
>
> I shortly plan to call count_mthp_stat() from more THP-invariant source
> files.
>
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>

Acked-by: Barry Song <baohua@kernel.org>

> ---
>  include/linux/huge_mm.h | 70 ++++++++++++++++++++---------------------
>  mm/memory.c             |  2 --
>  mm/shmem.c              |  6 ----
>  3 files changed, 35 insertions(+), 43 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index cff002be83eb..cb93b9009ce4 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -108,6 +108,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
>  #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
>  #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
>
> +enum mthp_stat_item {
> +       MTHP_STAT_ANON_FAULT_ALLOC,
> +       MTHP_STAT_ANON_FAULT_FALLBACK,
> +       MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
> +       MTHP_STAT_SWPOUT,
> +       MTHP_STAT_SWPOUT_FALLBACK,
> +       MTHP_STAT_SHMEM_ALLOC,
> +       MTHP_STAT_SHMEM_FALLBACK,
> +       MTHP_STAT_SHMEM_FALLBACK_CHARGE,
> +       MTHP_STAT_SPLIT,
> +       MTHP_STAT_SPLIT_FAILED,
> +       MTHP_STAT_SPLIT_DEFERRED,
> +       __MTHP_STAT_COUNT
> +};
> +
> +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
> +struct mthp_stat {
> +       unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
> +};
> +
> +DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
> +
> +static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> +{
> +       if (order <= 0 || order > PMD_ORDER)
> +               return;
> +
> +       this_cpu_inc(mthp_stats.stats[order][item]);
> +}
> +#else
> +static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> +{
> +}
> +#endif
> +
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>
>  extern unsigned long transparent_hugepage_flags;
> @@ -263,41 +298,6 @@ struct thpsize {
>
>  #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
>
> -enum mthp_stat_item {
> -       MTHP_STAT_ANON_FAULT_ALLOC,
> -       MTHP_STAT_ANON_FAULT_FALLBACK,
> -       MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
> -       MTHP_STAT_SWPOUT,
> -       MTHP_STAT_SWPOUT_FALLBACK,
> -       MTHP_STAT_SHMEM_ALLOC,
> -       MTHP_STAT_SHMEM_FALLBACK,
> -       MTHP_STAT_SHMEM_FALLBACK_CHARGE,
> -       MTHP_STAT_SPLIT,
> -       MTHP_STAT_SPLIT_FAILED,
> -       MTHP_STAT_SPLIT_DEFERRED,
> -       __MTHP_STAT_COUNT
> -};
> -
> -struct mthp_stat {
> -       unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
> -};
> -
> -#ifdef CONFIG_SYSFS
> -DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
> -
> -static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> -{
> -       if (order <= 0 || order > PMD_ORDER)
> -               return;
> -
> -       this_cpu_inc(mthp_stats.stats[order][item]);
> -}
> -#else
> -static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> -{
> -}
> -#endif
> -
>  #define transparent_hugepage_use_zero_page()                           \
>         (transparent_hugepage_flags &                                   \
>          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
> diff --git a/mm/memory.c b/mm/memory.c
> index 802d0d8a40f9..a50fdefb8f0b 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4597,9 +4597,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
>
>         folio_ref_add(folio, nr_pages - 1);
>         add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>         count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
> -#endif
>         folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
>         folio_add_lru_vma(folio, vma);
>  setpte:
> diff --git a/mm/shmem.c b/mm/shmem.c
> index f24dfbd387ba..fce1343f44e6 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1776,9 +1776,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
>
>                         if (pages == HPAGE_PMD_NR)
>                                 count_vm_event(THP_FILE_FALLBACK);
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>                         count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
> -#endif
>                         order = next_order(&suitable_orders, order);
>                 }
>         } else {
> @@ -1803,10 +1801,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
>                                 count_vm_event(THP_FILE_FALLBACK);
>                                 count_vm_event(THP_FILE_FALLBACK_CHARGE);
>                         }
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>                         count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
>                         count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
> -#endif
>                 }
>                 goto unlock;
>         }
> @@ -2180,9 +2176,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
>                 if (!IS_ERR(folio)) {
>                         if (folio_test_pmd_mappable(folio))
>                                 count_vm_event(THP_FILE_ALLOC);
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>                         count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
> -#endif
>                         goto alloced;
>                 }
>                 if (PTR_ERR(folio) == -EEXIST)
> --
> 2.43.0
>


  reply	other threads:[~2024-07-11  8:20 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-11  7:29 [PATCH v1 0/2] mTHP allocation stats for file-backed memory Ryan Roberts
2024-07-11  7:29 ` [PATCH v1 1/2] mm: Cleanup count_mthp_stat() definition Ryan Roberts
2024-07-11  8:20   ` Barry Song [this message]
2024-07-12  2:31   ` Baolin Wang
2024-07-12 11:57   ` Lance Yang
2024-07-11  7:29 ` [PATCH v1 2/2] mm: mTHP stats for pagecache folio allocations Ryan Roberts
2024-07-12  3:00   ` Baolin Wang
2024-07-12 12:22     ` Lance Yang
2024-07-13  1:08       ` David Hildenbrand
2024-07-13 10:45         ` Ryan Roberts
2024-07-16  8:31           ` Ryan Roberts
2024-07-16 10:19             ` David Hildenbrand
2024-07-16 11:14               ` Ryan Roberts
2024-07-17  8:02                 ` David Hildenbrand
2024-07-17  8:29                   ` Ryan Roberts
2024-07-17  8:44                     ` David Hildenbrand
2024-07-17  9:50                       ` Ryan Roberts
2024-07-17 10:03                         ` David Hildenbrand
2024-07-17 10:18                           ` Ryan Roberts
2024-07-17 10:25                             ` David Hildenbrand
2024-07-17 10:48                               ` Ryan Roberts
2024-07-13 11:00     ` Ryan Roberts
2024-07-13 12:54       ` Baolin Wang
2024-07-14  9:05         ` Ryan Roberts
2024-07-22  3:52           ` Baolin Wang
2024-07-22  7:36             ` Ryan Roberts
2024-07-12 22:44   ` kernel test robot
2024-07-15 13:55     ` Ryan Roberts

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAGsJ_4zzZAjsBU0H9yL55pKW5yym4D+YExTVqXC=wkyC4ixnMQ@mail.gmail.com' \
    --to=baohua@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=corbet@lwn.net \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=ioworker0@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=ryan.roberts@arm.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).