The Linux Kernel Mailing List
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: linux-kernel@vger.kernel.org
Cc: "David Hildenbrand (Arm)" <david@kernel.org>,
	"Jason Wang" <jasowang@redhat.com>,
	"Xuan Zhuo" <xuanzhuo@linux.alibaba.com>,
	"Eugenio Pérez" <eperezma@redhat.com>,
	"Muchun Song" <muchun.song@linux.dev>,
	"Oscar Salvador" <osalvador@suse.de>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Lorenzo Stoakes" <ljs@kernel.org>,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>,
	"Vlastimil Babka" <vbabka@kernel.org>,
	"Mike Rapoport" <rppt@kernel.org>,
	"Suren Baghdasaryan" <surenb@google.com>,
	"Michal Hocko" <mhocko@suse.com>,
	"Brendan Jackman" <jackmanb@google.com>,
	"Johannes Weiner" <hannes@cmpxchg.org>, "Zi Yan" <ziy@nvidia.com>,
	"Baolin Wang" <baolin.wang@linux.alibaba.com>,
	"Nico Pache" <npache@redhat.com>,
	"Ryan Roberts" <ryan.roberts@arm.com>,
	"Dev Jain" <dev.jain@arm.com>, "Barry Song" <baohua@kernel.org>,
	"Lance Yang" <lance.yang@linux.dev>,
	"Hugh Dickins" <hughd@google.com>,
	"Matthew Brost" <matthew.brost@intel.com>,
	"Joshua Hahn" <joshua.hahnjy@gmail.com>,
	"Rakie Kim" <rakie.kim@sk.com>,
	"Byungchul Park" <byungchul@sk.com>,
	"Gregory Price" <gourry@gourry.net>,
	"Ying Huang" <ying.huang@linux.alibaba.com>,
	"Alistair Popple" <apopple@nvidia.com>,
	"Christoph Lameter" <cl@gentwo.org>,
	"David Rientjes" <rientjes@google.com>,
	"Roman Gushchin" <roman.gushchin@linux.dev>,
	"Harry Yoo" <harry.yoo@oracle.com>,
	"Axel Rasmussen" <axelrasmussen@google.com>,
	"Yuanchu Xie" <yuanchu@google.com>, "Wei Xu" <weixugc@google.com>,
	"Chris Li" <chrisl@kernel.org>,
	"Kairui Song" <kasong@tencent.com>,
	"Kemeng Shi" <shikemeng@huaweicloud.com>,
	"Nhat Pham" <nphamcs@gmail.com>, "Baoquan He" <bhe@redhat.com>,
	virtualization@lists.linux.dev, linux-mm@kvack.org,
	"Andrea Arcangeli" <aarcange@redhat.com>
Subject: [PATCH resend v6 15/30] mm: memfd: skip zeroing for zeroed hugetlb pool pages
Date: Mon, 11 May 2026 05:03:08 -0400	[thread overview]
Message-ID: <a39ab8288aceb244bdb31141aa9cf67106c0e042.1778489843.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1778489843.git.mst@redhat.com>

gather_surplus_pages() pre-allocates hugetlb pages into the pool
during mmap.  Pass __GFP_ZERO so these pages are zeroed by the
buddy allocator, and HPG_zeroed is set by alloc_surplus_hugetlb_folio.

Add bool *zeroed output to alloc_hugetlb_folio_reserve() so
callers can check whether the pool page is known-zero.  memfd's
memfd_alloc_folio() uses this to skip the explicit folio_zero_user()
when the page is already zero.

This avoids redundant zeroing for memfd hugetlb pages that were
pre-allocated into the pool and never mapped to userspace.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Assisted-by: Claude:claude-opus-4-6
---
 include/linux/hugetlb.h |  6 ++++--
 mm/hugetlb.c            | 11 +++++++++--
 mm/memfd.c              | 14 ++++++++------
 3 files changed, 21 insertions(+), 10 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 950e1702fbd8..c4e66a371fce 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -714,7 +714,8 @@ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
 				nodemask_t *nmask, gfp_t gfp_mask,
 				bool allow_alloc_fallback);
 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
-					  nodemask_t *nmask, gfp_t gfp_mask);
+					  nodemask_t *nmask, gfp_t gfp_mask,
+					  bool *zeroed);
 
 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
 			pgoff_t idx);
@@ -1142,7 +1143,8 @@ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
 
 static inline struct folio *
 alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
-			    nodemask_t *nmask, gfp_t gfp_mask)
+			    nodemask_t *nmask, gfp_t gfp_mask,
+			    bool *zeroed)
 {
 	return NULL;
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8710366d14b7..03ad5c1e0655 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2205,7 +2205,7 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
 }
 
 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
-		nodemask_t *nmask, gfp_t gfp_mask)
+		nodemask_t *nmask, gfp_t gfp_mask, bool *zeroed)
 {
 	struct folio *folio;
 
@@ -2221,6 +2221,12 @@ struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
 		h->resv_huge_pages--;
 
 	spin_unlock_irq(&hugetlb_lock);
+
+	if (zeroed && folio) {
+		*zeroed = folio_test_hugetlb_zeroed(folio);
+		folio_clear_hugetlb_zeroed(folio);
+	}
+
 	return folio;
 }
 
@@ -2305,7 +2311,8 @@ static int gather_surplus_pages(struct hstate *h, long delta)
 		 * It is okay to use NUMA_NO_NODE because we use numa_mem_id()
 		 * down the road to pick the current node if that is the case.
 		 */
-		folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
+		folio = alloc_surplus_hugetlb_folio(h,
+						    htlb_alloc_mask(h) | __GFP_ZERO,
 						    NUMA_NO_NODE, &alloc_nodemask,
 						    USER_ADDR_NONE);
 		if (!folio) {
diff --git a/mm/memfd.c b/mm/memfd.c
index fb425f4e315f..5518f7d2d91f 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -69,6 +69,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
 #ifdef CONFIG_HUGETLB_PAGE
 	struct folio *folio;
 	gfp_t gfp_mask;
+	bool zeroed;
 
 	if (is_file_hugepages(memfd)) {
 		/*
@@ -93,17 +94,18 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
 		folio = alloc_hugetlb_folio_reserve(h,
 						    numa_node_id(),
 						    NULL,
-						    gfp_mask);
+						    gfp_mask,
+						    &zeroed);
 		if (folio) {
 			u32 hash;
 
 			/*
-			 * Zero the folio to prevent information leaks to userspace.
-			 * Use folio_zero_user() which is optimized for huge/gigantic
-			 * pages. Pass 0 as addr_hint since this is not a faulting path
-			 *  and we don't have a user virtual address yet.
+			 * Zero the folio to prevent information leaks to
+			 * userspace.  Skip if the pool page is known-zero
+			 * (HPG_zeroed set during pool pre-allocation).
 			 */
-			folio_zero_user(folio, 0);
+			if (!zeroed)
+				folio_zero_user(folio, 0);
 
 			/*
 			 * Mark the folio uptodate before adding to page cache,
-- 
MST


  parent reply	other threads:[~2026-05-11  9:03 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-11  9:01 [PATCH resend v6 00/30] mm/virtio: skip redundant zeroing of host-zeroed pages Michael S. Tsirkin
2026-05-11  9:01 ` [PATCH resend v6 01/30] mm: move vma_alloc_folio_noprof to page_alloc.c Michael S. Tsirkin
2026-05-11 14:47   ` Gregory Price
2026-05-11 15:09     ` Michael S. Tsirkin
2026-05-11  9:01 ` [PATCH resend v6 02/30] mm: mempolicy: fix interleave index for unaligned VMA start Michael S. Tsirkin
2026-05-11 14:59   ` Gregory Price
2026-05-11 15:15     ` Michael S. Tsirkin
2026-05-11 15:26     ` Michael S. Tsirkin
2026-05-11  9:01 ` [PATCH resend v6 03/30] mm: thread user_addr through page allocator for cache-friendly zeroing Michael S. Tsirkin
2026-05-11 15:37   ` Gregory Price
2026-05-11 15:55     ` Michael S. Tsirkin
2026-05-11 16:52       ` Gregory Price
2026-05-11  9:02 ` [PATCH resend v6 04/30] mm: add folio_zero_user stub for configs without THP/HUGETLBFS Michael S. Tsirkin
2026-05-11  9:02 ` [PATCH resend v6 05/30] mm: page_alloc: move prep_compound_page before post_alloc_hook Michael S. Tsirkin
2026-05-11 15:54   ` Gregory Price
2026-05-11  9:02 ` [PATCH resend v6 06/30] mm: use folio_zero_user for user pages in post_alloc_hook Michael S. Tsirkin
2026-05-11  9:02 ` [PATCH resend v6 07/30] mm: use __GFP_ZERO in vma_alloc_zeroed_movable_folio Michael S. Tsirkin
2026-05-11  9:02 ` [PATCH resend v6 08/30] mm: remove arch vma_alloc_zeroed_movable_folio overrides Michael S. Tsirkin
2026-05-11  9:02 ` [PATCH resend v6 09/30] mm: alloc_anon_folio: pass raw fault address to vma_alloc_folio Michael S. Tsirkin
2026-05-11 16:03   ` Gregory Price
2026-05-11  9:02 ` [PATCH resend v6 10/30] mm: alloc_swap_folio: " Michael S. Tsirkin
2026-05-11 16:05   ` Gregory Price
2026-05-11 21:41     ` Michael S. Tsirkin
2026-05-11  9:02 ` [PATCH resend v6 11/30] mm: use __GFP_ZERO in alloc_anon_folio Michael S. Tsirkin
2026-05-11 16:15   ` Gregory Price
2026-05-11  9:02 ` [PATCH resend v6 12/30] mm: vma_alloc_anon_folio_pmd: pass raw fault address to vma_alloc_folio Michael S. Tsirkin
2026-05-11 16:17   ` Gregory Price
2026-05-11  9:02 ` [PATCH resend v6 13/30] mm: use __GFP_ZERO in vma_alloc_anon_folio_pmd Michael S. Tsirkin
2026-05-11 16:26   ` Gregory Price
2026-05-11  9:03 ` [PATCH resend v6 14/30] mm: hugetlb: use __GFP_ZERO and skip zeroing for zeroed pages Michael S. Tsirkin
2026-05-11 16:36   ` Gregory Price
2026-05-11  9:03 ` Michael S. Tsirkin [this message]
2026-05-11 16:39   ` [PATCH resend v6 15/30] mm: memfd: skip zeroing for zeroed hugetlb pool pages Gregory Price
2026-05-11  9:03 ` [PATCH resend v6 16/30] mm: page_reporting: allow driver to set batch capacity Michael S. Tsirkin
2026-05-11  9:03 ` [PATCH resend v6 17/30] mm: page_alloc: propagate PageReported flag across buddy splits Michael S. Tsirkin
2026-05-11  9:03 ` [PATCH resend v6 18/30] mm: page_reporting: skip redundant zeroing of host-zeroed reported pages Michael S. Tsirkin
2026-05-11  9:03 ` [PATCH resend v6 19/30] mm: page_reporting: add per-page zeroed bitmap for host feedback Michael S. Tsirkin
2026-05-11  9:03 ` [PATCH resend v6 20/30] mm: page_alloc: clear PG_zeroed on buddy merge if not both zero Michael S. Tsirkin
2026-05-11  9:03 ` [PATCH resend v6 21/30] mm: page_alloc: preserve PG_zeroed in page_del_and_expand Michael S. Tsirkin
2026-05-11  9:03 ` [PATCH resend v6 22/30] virtio_balloon: submit reported pages as individual buffers Michael S. Tsirkin
2026-05-11  9:03 ` [PATCH resend v6 23/30] mm: page_reporting: add flush parameter with page budget Michael S. Tsirkin
2026-05-11  9:03 ` [PATCH resend v6 24/30] mm: page_alloc: propagate PG_zeroed in split_large_buddy Michael S. Tsirkin
2026-05-11  9:04 ` [PATCH resend v6 25/30] virtio_balloon: skip zeroing for host-zeroed reported pages Michael S. Tsirkin
2026-05-11  9:04 ` [PATCH resend v6 26/30] virtio_balloon: disable reporting zeroed optimization for confidential guests Michael S. Tsirkin
2026-05-11  9:04 ` [PATCH resend v6 27/30] mm: add free_frozen_pages_zeroed Michael S. Tsirkin
2026-05-11  9:04 ` [PATCH resend v6 28/30] mm: add put_page_zeroed and folio_put_zeroed Michael S. Tsirkin
2026-05-11  9:04 ` [PATCH resend v6 29/30] virtio_balloon: implement VIRTIO_BALLOON_F_DEVICE_INIT_ON_INFLATE Michael S. Tsirkin
2026-05-11  9:04 ` [PATCH resend v6 30/30] mm: balloon: use put_page_zeroed for zeroed balloon pages Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a39ab8288aceb244bdb31141aa9cf67106c0e042.1778489843.git.mst@redhat.com \
    --to=mst@redhat.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=apopple@nvidia.com \
    --cc=axelrasmussen@google.com \
    --cc=baohua@kernel.org \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=bhe@redhat.com \
    --cc=byungchul@sk.com \
    --cc=chrisl@kernel.org \
    --cc=cl@gentwo.org \
    --cc=david@kernel.org \
    --cc=dev.jain@arm.com \
    --cc=eperezma@redhat.com \
    --cc=gourry@gourry.net \
    --cc=hannes@cmpxchg.org \
    --cc=harry.yoo@oracle.com \
    --cc=hughd@google.com \
    --cc=jackmanb@google.com \
    --cc=jasowang@redhat.com \
    --cc=joshua.hahnjy@gmail.com \
    --cc=kasong@tencent.com \
    --cc=lance.yang@linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=ljs@kernel.org \
    --cc=matthew.brost@intel.com \
    --cc=mhocko@suse.com \
    --cc=muchun.song@linux.dev \
    --cc=npache@redhat.com \
    --cc=nphamcs@gmail.com \
    --cc=osalvador@suse.de \
    --cc=rakie.kim@sk.com \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=rppt@kernel.org \
    --cc=ryan.roberts@arm.com \
    --cc=shikemeng@huaweicloud.com \
    --cc=surenb@google.com \
    --cc=vbabka@kernel.org \
    --cc=virtualization@lists.linux.dev \
    --cc=weixugc@google.com \
    --cc=xuanzhuo@linux.alibaba.com \
    --cc=ying.huang@linux.alibaba.com \
    --cc=yuanchu@google.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox