From: Mike Rapoport <rppt@kernel.org>
To: Usama Arif <usama.arif@bytedance.com>
Cc: linux-mm@kvack.org, muchun.song@linux.dev,
mike.kravetz@oracle.com, linux-kernel@vger.kernel.org,
fam.zheng@bytedance.com, liangma@liangbit.com,
simon.evans@bytedance.com, punit.agrawal@bytedance.com
Subject: Re: [v1 1/6] mm: hugetlb: Skip prep of tail pages when HVO is enabled
Date: Sat, 29 Jul 2023 09:37:21 +0300 [thread overview]
Message-ID: <20230729063721.GD1901145@kernel.org> (raw)
In-Reply-To: <20230727204624.1942372-2-usama.arif@bytedance.com>
On Thu, Jul 27, 2023 at 09:46:19PM +0100, Usama Arif wrote:
> When vmemmap is optimizable, it will free all the
> duplicated tail pages in hugetlb_vmemmap_optimize while
> preparing the new hugepage. Hence, there is no need to
> prepare them.
>
> For 1G x86 hugepages, it avoids preparing
> 262144 - 64 = 262080 struct pages per hugepage.
>
> Signed-off-by: Usama Arif <usama.arif@bytedance.com>
> ---
> mm/hugetlb.c | 32 +++++++++++++++++++++++---------
> mm/hugetlb_vmemmap.c | 2 +-
> mm/hugetlb_vmemmap.h | 7 +++++++
> 3 files changed, 31 insertions(+), 10 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 64a3239b6407..58cf5978bee1 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1943,13 +1943,24 @@ static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int ni
> }
>
> static bool __prep_compound_gigantic_folio(struct folio *folio,
> - unsigned int order, bool demote)
> + unsigned int order, bool demote,
> + bool hvo)
I think it would be cleaner to pass struct hstate * instead of order here
so that order and hvo can be computed locally.
> {
> int i, j;
> int nr_pages = 1 << order;
> struct page *p;
>
> __folio_clear_reserved(folio);
> +
> +#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
> + /*
> + * No need to prep pages that will be freed later by hugetlb_vmemmap_optimize
> + * in prep_new_huge_page. Hence, reduce nr_pages to the pages that will be kept.
> + */
> + if (hvo)
if (IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) && hvo)
is better than ifdef IMO.
> + nr_pages = HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page);
> +#endif
> +
> for (i = 0; i < nr_pages; i++) {
> p = folio_page(folio, i);
>
> @@ -2020,15 +2031,15 @@ static bool __prep_compound_gigantic_folio(struct folio *folio,
> }
>
> static bool prep_compound_gigantic_folio(struct folio *folio,
> - unsigned int order)
> + unsigned int order, bool hvo)
> {
> - return __prep_compound_gigantic_folio(folio, order, false);
> + return __prep_compound_gigantic_folio(folio, order, false, hvo);
> }
>
> static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
> - unsigned int order)
> + unsigned int order, bool hvo)
> {
> - return __prep_compound_gigantic_folio(folio, order, true);
> + return __prep_compound_gigantic_folio(folio, order, true, hvo);
> }
>
> /*
> @@ -2185,7 +2196,8 @@ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
> if (!folio)
> return NULL;
> if (hstate_is_gigantic(h)) {
> - if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
> + if (!prep_compound_gigantic_folio(folio, huge_page_order(h),
> + vmemmap_should_optimize(h, &folio->page))) {
> /*
> * Rare failure to convert pages to compound page.
> * Free pages and try again - ONCE!
> @@ -3201,7 +3213,8 @@ static void __init gather_bootmem_prealloc(void)
>
> VM_BUG_ON(!hstate_is_gigantic(h));
> WARN_ON(folio_ref_count(folio) != 1);
> - if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
> + if (prep_compound_gigantic_folio(folio, huge_page_order(h),
> + vmemmap_should_optimize(h, page))) {
> WARN_ON(folio_test_reserved(folio));
> prep_new_hugetlb_folio(h, folio, folio_nid(folio));
> free_huge_page(page); /* add to the hugepage allocator */
> @@ -3624,8 +3637,9 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
> subpage = folio_page(folio, i);
> inner_folio = page_folio(subpage);
> if (hstate_is_gigantic(target_hstate))
> - prep_compound_gigantic_folio_for_demote(inner_folio,
> - target_hstate->order);
> + prep_compound_gigantic_folio_for_demote(folio,
> + target_hstate->order,
> + vmemmap_should_optimize(target_hstate, subpage));
> else
> prep_compound_page(subpage, target_hstate->order);
> folio_change_private(inner_folio, NULL);
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index c2007ef5e9b0..b721e87de2b3 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -486,7 +486,7 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> }
>
> /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
> -static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
> +bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
> {
> if (!READ_ONCE(vmemmap_optimize_enabled))
> return false;
> diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
> index 25bd0e002431..07555d2dc0cb 100644
> --- a/mm/hugetlb_vmemmap.h
> +++ b/mm/hugetlb_vmemmap.h
> @@ -13,6 +13,7 @@
> #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
> int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
> void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
> +bool vmemmap_should_optimize(const struct hstate *h, const struct page *head);
>
> /*
> * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
> @@ -51,6 +52,12 @@ static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate
> {
> return 0;
> }
> +
> +bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
> +{
> + return false;
> +}
> +
> #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
>
> static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
> --
> 2.25.1
>
--
Sincerely yours,
Mike.
next prev parent reply other threads:[~2023-07-29 6:38 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-27 20:46 [v1 0/6] mm/memblock: Skip prep and initialization of struct pages freed later by HVO Usama Arif
2023-07-27 20:46 ` [v1 1/6] mm: hugetlb: Skip prep of tail pages when HVO is enabled Usama Arif
2023-07-28 8:18 ` kernel test robot
2023-07-28 11:26 ` kernel test robot
2023-07-29 6:37 ` Mike Rapoport [this message]
2023-07-27 20:46 ` [v1 2/6] mm: hugetlb_vmemmap: Use nid of the head page to reallocate it Usama Arif
2023-07-27 20:46 ` [v1 3/6] memblock: add parameter to memblock_setclr_flag for selecting memblock_type Usama Arif
2023-07-29 6:42 ` Mike Rapoport
2023-07-27 20:46 ` [v1 4/6] memblock: introduce MEMBLOCK_RSRV_NOINIT flag Usama Arif
2023-07-28 4:30 ` Mika Penttilä
2023-07-28 13:47 ` [External] " Usama Arif
2023-07-28 15:51 ` Mika Penttilä
2023-07-29 6:58 ` Mike Rapoport
2023-07-27 20:46 ` [v1 5/6] mm: move allocation of gigantic hstates to the start of mm_core_init Usama Arif
2023-07-29 7:34 ` Mike Rapoport
2023-07-27 20:46 ` [v1 6/6] mm: hugetlb: Skip initialization of struct pages freed later by HVO Usama Arif
2023-07-28 16:33 ` kernel test robot
2023-07-28 17:25 ` kernel test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230729063721.GD1901145@kernel.org \
--to=rppt@kernel.org \
--cc=fam.zheng@bytedance.com \
--cc=liangma@liangbit.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mike.kravetz@oracle.com \
--cc=muchun.song@linux.dev \
--cc=punit.agrawal@bytedance.com \
--cc=simon.evans@bytedance.com \
--cc=usama.arif@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).