linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Yang Shi <shy828301@gmail.com>
To: "Zach O'Keefe" <zokeefe@google.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>,
	David Hildenbrand <david@redhat.com>,
	 David Rientjes <rientjes@google.com>,
	Matthew Wilcox <willy@infradead.org>,
	 Michal Hocko <mhocko@suse.com>,
	Pasha Tatashin <pasha.tatashin@soleen.com>,
	 Peter Xu <peterx@redhat.com>,
	Rongwei Wang <rongwei.wang@linux.alibaba.com>,
	 SeongJae Park <sj@kernel.org>, Song Liu <songliubraving@fb.com>,
	Vlastimil Babka <vbabka@suse.cz>,  Zi Yan <ziy@nvidia.com>,
	Linux MM <linux-mm@kvack.org>,
	 Andrea Arcangeli <aarcange@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	 Arnd Bergmann <arnd@arndb.de>,
	Axel Rasmussen <axelrasmussen@google.com>,
	 Chris Kennelly <ckennelly@google.com>,
	Chris Zankel <chris@zankel.net>, Helge Deller <deller@gmx.de>,
	 Hugh Dickins <hughd@google.com>,
	Ivan Kokshaysky <ink@jurassic.park.msu.ru>,
	 "James E.J. Bottomley" <James.Bottomley@hansenpartnership.com>,
	Jens Axboe <axboe@kernel.dk>,
	 "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Matt Turner <mattst88@gmail.com>,
	 Max Filippov <jcmvbkbc@gmail.com>,
	Miaohe Lin <linmiaohe@huawei.com>,
	 Minchan Kim <minchan@kernel.org>,
	Patrick Xia <patrickx@google.com>,
	 Pavel Begunkov <asml.silence@gmail.com>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Subject: Re: [PATCH v6 10/15] mm/khugepaged: rename prefix of shared collapse functions
Date: Mon, 6 Jun 2022 16:56:25 -0700	[thread overview]
Message-ID: <CAHbLzkpUnxVmRU9T5RxnMVV0epcQioPbfor6B5m2iR8bLO6jsA@mail.gmail.com> (raw)
In-Reply-To: <20220604004004.954674-11-zokeefe@google.com>

On Fri, Jun 3, 2022 at 5:40 PM Zach O'Keefe <zokeefe@google.com> wrote:
>
> The following functions/tracepoints are shared between khugepaged and
> madvise collapse contexts.  Replace the "khugepaged_" prefix with
> generic "hpage_collapse_" prefix in such cases:
>
> khugepaged_test_exit() -> hpage_collapse_test_exit()
> khugepaged_scan_abort() -> hpage_collapse_scan_abort()
> khugepaged_scan_pmd() -> hpage_collapse_scan_pmd()
> khugepaged_find_target_node() -> hpage_collapse_find_target_node()
> khugepaged_alloc_page() -> hpage_collapse_alloc_page()
> huge_memory:mm_khugepaged_scan_pmd ->
>         huge_memory:mm_hpage_collapse_scan_pmd
>
> Signed-off-by: Zach O'Keefe <zokeefe@google.com>
> ---
>  include/trace/events/huge_memory.h |  2 +-
>  mm/khugepaged.c                    | 71 ++++++++++++++++--------------
>  2 files changed, 38 insertions(+), 35 deletions(-)
>
> diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
> index 55392bf30a03..fb6c73632ff3 100644
> --- a/include/trace/events/huge_memory.h
> +++ b/include/trace/events/huge_memory.h
> @@ -48,7 +48,7 @@ SCAN_STATUS
>  #define EM(a, b)       {a, b},
>  #define EMe(a, b)      {a, b}
>
> -TRACE_EVENT(mm_khugepaged_scan_pmd,
> +TRACE_EVENT(mm_hpage_collapse_scan_pmd,

You may not want to change the name of the tracepoint since it is a
part of kernel ABI. Otherwise the patch looks good to me.
Reviewed-by: Yang Shi <shy828301@gmail.om>

>
>         TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
>                  int referenced, int none_or_zero, int status, int unmapped),
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 073d6bb03b37..119c1bc84af7 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -102,7 +102,7 @@ struct collapse_control {
>         /* Num pages scanned per node */
>         int node_load[MAX_NUMNODES];
>
> -       /* Last target selected in khugepaged_find_target_node() */
> +       /* Last target selected in hpage_collapse_find_target_node() */
>         int last_target_node;
>
>         /* gfp used for allocation and memcg charging */
> @@ -456,7 +456,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
>         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
>  }
>
> -static inline int khugepaged_test_exit(struct mm_struct *mm)
> +static inline int hpage_collapse_test_exit(struct mm_struct *mm)
>  {
>         return atomic_read(&mm->mm_users) == 0;
>  }
> @@ -508,7 +508,7 @@ void __khugepaged_enter(struct mm_struct *mm)
>                 return;
>
>         /* __khugepaged_exit() must not run from under us */
> -       VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
> +       VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
>         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
>                 free_mm_slot(mm_slot);
>                 return;
> @@ -562,11 +562,10 @@ void __khugepaged_exit(struct mm_struct *mm)
>         } else if (mm_slot) {
>                 /*
>                  * This is required to serialize against
> -                * khugepaged_test_exit() (which is guaranteed to run
> -                * under mmap sem read mode). Stop here (after we
> -                * return all pagetables will be destroyed) until
> -                * khugepaged has finished working on the pagetables
> -                * under the mmap_lock.
> +                * hpage_collapse_test_exit() (which is guaranteed to run
> +                * under mmap sem read mode). Stop here (after we return all
> +                * pagetables will be destroyed) until khugepaged has finished
> +                * working on the pagetables under the mmap_lock.
>                  */
>                 mmap_write_lock(mm);
>                 mmap_write_unlock(mm);
> @@ -803,7 +802,7 @@ static void khugepaged_alloc_sleep(void)
>         remove_wait_queue(&khugepaged_wait, &wait);
>  }
>
> -static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
> +static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
>  {
>         int i;
>
> @@ -834,7 +833,7 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
>  }
>
>  #ifdef CONFIG_NUMA
> -static int khugepaged_find_target_node(struct collapse_control *cc)
> +static int hpage_collapse_find_target_node(struct collapse_control *cc)
>  {
>         int nid, target_node = 0, max_value = 0;
>
> @@ -858,7 +857,7 @@ static int khugepaged_find_target_node(struct collapse_control *cc)
>         return target_node;
>  }
>  #else
> -static int khugepaged_find_target_node(struct collapse_control *cc)
> +static int hpage_collapse_find_target_node(struct collapse_control *cc)
>  {
>         return 0;
>  }
> @@ -877,7 +876,7 @@ static bool alloc_fail_should_sleep(int result, bool *wait)
>         return false;
>  }
>
> -static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
> +static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
>  {
>         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
>         if (unlikely(!*hpage)) {
> @@ -905,7 +904,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>         unsigned long hstart, hend;
>         unsigned long vma_flags;
>
> -       if (unlikely(khugepaged_test_exit(mm)))
> +       if (unlikely(hpage_collapse_test_exit(mm)))
>                 return SCAN_ANY_PROCESS;
>
>         *vmap = vma = find_vma(mm, address);
> @@ -962,7 +961,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
>
>  /*
>   * Bring missing pages in from swap, to complete THP collapse.
> - * Only done if khugepaged_scan_pmd believes it is worthwhile.
> + * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
>   *
>   * Called and returns without pte mapped or spinlocks held,
>   * but with mmap_lock held to protect against vma changes.
> @@ -1027,9 +1026,9 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
>  static int alloc_charge_hpage(struct mm_struct *mm, struct page **hpage,
>                               struct collapse_control *cc)
>  {
> -       int node = khugepaged_find_target_node(cc);
> +       int node = hpage_collapse_find_target_node(cc);
>
> -       if (!khugepaged_alloc_page(hpage, cc->gfp, node))
> +       if (!hpage_collapse_alloc_page(hpage, cc->gfp, node))
>                 return SCAN_ALLOC_HUGE_PAGE_FAIL;
>         if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, cc->gfp)))
>                 return SCAN_CGROUP_CHARGE_FAIL;
> @@ -1188,9 +1187,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>         return result;
>  }
>
> -static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
> -                              unsigned long address, bool *mmap_locked,
> -                              struct collapse_control *cc)
> +static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> +                                  struct vm_area_struct *vma,
> +                                  unsigned long address, bool *mmap_locked,
> +                                  struct collapse_control *cc)
>  {
>         pmd_t *pmd;
>         pte_t *pte, *_pte;
> @@ -1282,7 +1282,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
>                  * hit record.
>                  */
>                 node = page_to_nid(page);
> -               if (khugepaged_scan_abort(node, cc)) {
> +               if (hpage_collapse_scan_abort(node, cc)) {
>                         result = SCAN_SCAN_ABORT;
>                         goto out_unmap;
>                 }
> @@ -1345,8 +1345,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
>                                             unmapped, cc);
>         }
>  out:
> -       trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
> -                                    none_or_zero, result, unmapped);
> +       trace_mm_hpage_collapse_scan_pmd(mm, page, writable, referenced,
> +                                        none_or_zero, result, unmapped);
>         return result;
>  }
>
> @@ -1356,7 +1356,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
>
>         lockdep_assert_held(&khugepaged_mm_lock);
>
> -       if (khugepaged_test_exit(mm)) {
> +       if (hpage_collapse_test_exit(mm)) {
>                 /* free mm_slot */
>                 hash_del(&mm_slot->hash);
>                 list_del(&mm_slot->mm_node);
> @@ -1530,7 +1530,7 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
>         if (!mmap_write_trylock(mm))
>                 return;
>
> -       if (unlikely(khugepaged_test_exit(mm)))
> +       if (unlikely(hpage_collapse_test_exit(mm)))
>                 goto out;
>
>         for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
> @@ -1593,7 +1593,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
>                          * it'll always mapped in small page size for uffd-wp
>                          * registered ranges.
>                          */
> -                       if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
> +                       if (!hpage_collapse_test_exit(mm) &&
> +                           !userfaultfd_wp(vma))
>                                 collapse_and_free_pmd(mm, vma, addr, pmd);
>                         mmap_write_unlock(mm);
>                 } else {
> @@ -2020,7 +2021,7 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
>                 }
>
>                 node = page_to_nid(page);
> -               if (khugepaged_scan_abort(node, cc)) {
> +               if (hpage_collapse_scan_abort(node, cc)) {
>                         result = SCAN_SCAN_ABORT;
>                         break;
>                 }
> @@ -2114,7 +2115,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>                 goto breakouterloop_mmap_lock;
>
>         progress++;
> -       if (unlikely(khugepaged_test_exit(mm)))
> +       if (unlikely(hpage_collapse_test_exit(mm)))
>                 goto breakouterloop;
>
>         address = khugepaged_scan.address;
> @@ -2123,7 +2124,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>                 unsigned long hstart, hend;
>
>                 cond_resched();
> -               if (unlikely(khugepaged_test_exit(mm))) {
> +               if (unlikely(hpage_collapse_test_exit(mm))) {
>                         progress++;
>                         break;
>                 }
> @@ -2148,7 +2149,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>                         bool mmap_locked = true;
>
>                         cond_resched();
> -                       if (unlikely(khugepaged_test_exit(mm)))
> +                       if (unlikely(hpage_collapse_test_exit(mm)))
>                                 goto breakouterloop;
>
>                         /* reset gfp flags since sysfs settings might change */
> @@ -2168,9 +2169,10 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>                                                                cc);
>                                 fput(file);
>                         } else {
> -                               *result = khugepaged_scan_pmd(mm, vma,
> -                                                             khugepaged_scan.address,
> -                                                             &mmap_locked, cc);
> +                               *result = hpage_collapse_scan_pmd(mm, vma,
> +                                                                 khugepaged_scan.address,
> +                                                                 &mmap_locked,
> +                                                                 cc);
>                         }
>                         if (*result == SCAN_SUCCEED)
>                                 ++khugepaged_pages_collapsed;
> @@ -2200,7 +2202,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>          * Release the current mm_slot if this mm is about to die, or
>          * if we scanned all vmas of this mm.
>          */
> -       if (khugepaged_test_exit(mm) || !vma) {
> +       if (hpage_collapse_test_exit(mm) || !vma) {
>                 /*
>                  * Make sure that if mm_users is reaching zero while
>                  * khugepaged runs here, khugepaged_exit will find
> @@ -2482,7 +2484,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
>                 }
>                 mmap_assert_locked(mm);
>                 memset(cc.node_load, 0, sizeof(cc.node_load));
> -               result = khugepaged_scan_pmd(mm, vma, addr, &mmap_locked, &cc);
> +               result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked,
> +                                                &cc);
>                 if (!mmap_locked)
>                         *prev = NULL;  /* Tell caller we dropped mmap_lock */
>
> --
> 2.36.1.255.ge46751e96f-goog
>


  reply	other threads:[~2022-06-06 23:56 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-04  0:39 [PATCH v6 00/15] mm: userspace hugepage collapse Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 01/15] mm: khugepaged: don't carry huge page to the next loop for !CONFIG_NUMA Zach O'Keefe
2022-06-06 18:25   ` Yang Shi
2022-06-29 20:49   ` Peter Xu
2022-06-30  1:15     ` Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 02/15] mm/khugepaged: record SCAN_PMD_MAPPED when scan_pmd() finds THP Zach O'Keefe
2022-06-06 20:45   ` Yang Shi
2022-06-07 16:01     ` Zach O'Keefe
2022-06-07 19:32       ` Zach O'Keefe
2022-06-07 21:27         ` Yang Shi
2022-06-08  0:27           ` Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 03/15] mm/khugepaged: add struct collapse_control Zach O'Keefe
2022-06-06  2:41   ` kernel test robot
2022-06-06 16:40     ` Zach O'Keefe
2022-06-06 20:20       ` Yang Shi
2022-06-06 21:22         ` Yang Shi
2022-06-06 22:23       ` Andrew Morton
2022-06-06 23:53         ` Yang Shi
2022-06-08  0:42           ` Zach O'Keefe
2022-06-08  1:00             ` Yang Shi
2022-06-08  1:06               ` Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 04/15] mm/khugepaged: dedup and simplify hugepage alloc and charging Zach O'Keefe
2022-06-06 20:50   ` Yang Shi
2022-06-29 21:58   ` Peter Xu
2022-06-30 20:14     ` Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 05/15] mm/khugepaged: make allocation semantics context-specific Zach O'Keefe
2022-06-06 20:58   ` Yang Shi
2022-06-07 19:56     ` Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 06/15] mm/khugepaged: pipe enum scan_result codes back to callers Zach O'Keefe
2022-06-06 22:39   ` Yang Shi
2022-06-07  0:17     ` Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 07/15] mm/khugepaged: add flag to ignore khugepaged heuristics Zach O'Keefe
2022-06-06 22:51   ` Yang Shi
2022-06-04  0:39 ` [PATCH v6 08/15] mm/khugepaged: add flag to ignore THP sysfs enabled Zach O'Keefe
2022-06-06 23:02   ` Yang Shi
     [not found]   ` <YrzehlUoo2iMMLC2@xz-m1.local>
     [not found]     ` <CAAa6QmRXD5KboM8=ZZRPThOmcLEPtxzf0XyjkCeY_vgR7VOPqg@mail.gmail.com>
2022-06-30  2:32       ` Peter Xu
2022-06-30 14:17         ` Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 09/15] mm/madvise: introduce MADV_COLLAPSE sync hugepage collapse Zach O'Keefe
2022-06-06 23:53   ` Yang Shi
2022-06-07 22:48     ` Zach O'Keefe
2022-06-08  0:39       ` Yang Shi
2022-06-09 17:35         ` Zach O'Keefe
2022-06-09 18:51           ` Yang Shi
2022-06-10 14:51             ` Zach O'Keefe
2022-06-04  0:39 ` [PATCH v6 10/15] mm/khugepaged: rename prefix of shared collapse functions Zach O'Keefe
2022-06-06 23:56   ` Yang Shi [this message]
2022-06-07  0:31     ` Zach O'Keefe
2022-06-04  0:40 ` [PATCH v6 11/15] mm/madvise: add MADV_COLLAPSE to process_madvise() Zach O'Keefe
2022-06-07 19:14   ` Yang Shi
2022-06-04  0:40 ` [PATCH v6 12/15] selftests/vm: modularize collapse selftests Zach O'Keefe
2022-06-04  0:40 ` [PATCH v6 13/15] selftests/vm: add MADV_COLLAPSE collapse context to selftests Zach O'Keefe
2022-06-04  0:40 ` [PATCH v6 14/15] selftests/vm: add selftest to verify recollapse of THPs Zach O'Keefe
2022-06-04  0:40 ` [PATCH v6 15/15] tools headers uapi: add MADV_COLLAPSE madvise mode to tools Zach O'Keefe
2022-06-06 23:58   ` Yang Shi
2022-06-07  0:24     ` Zach O'Keefe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAHbLzkpUnxVmRU9T5RxnMVV0epcQioPbfor6B5m2iR8bLO6jsA@mail.gmail.com \
    --to=shy828301@gmail.com \
    --cc=James.Bottomley@hansenpartnership.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=alex.shi@linux.alibaba.com \
    --cc=arnd@arndb.de \
    --cc=asml.silence@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=axelrasmussen@google.com \
    --cc=chris@zankel.net \
    --cc=ckennelly@google.com \
    --cc=david@redhat.com \
    --cc=deller@gmx.de \
    --cc=hughd@google.com \
    --cc=ink@jurassic.park.msu.ru \
    --cc=jcmvbkbc@gmail.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linmiaohe@huawei.com \
    --cc=linux-mm@kvack.org \
    --cc=mattst88@gmail.com \
    --cc=mhocko@suse.com \
    --cc=minchan@kernel.org \
    --cc=pasha.tatashin@soleen.com \
    --cc=patrickx@google.com \
    --cc=peterx@redhat.com \
    --cc=rientjes@google.com \
    --cc=rongwei.wang@linux.alibaba.com \
    --cc=sj@kernel.org \
    --cc=songliubraving@fb.com \
    --cc=tsbogend@alpha.franken.de \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    --cc=ziy@nvidia.com \
    --cc=zokeefe@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).