linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
To: David Hildenbrand <david@redhat.com>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org,
	nvdimm@lists.linux.dev, linuxppc-dev@lists.ozlabs.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Madhavan Srinivasan <maddy@linux.ibm.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Nicholas Piggin <npiggin@gmail.com>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Juergen Gross <jgross@suse.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
	Dan Williams <dan.j.williams@intel.com>,
	Matthew Wilcox <willy@infradead.org>, Jan Kara <jack@suse.cz>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Christian Brauner <brauner@kernel.org>,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>,
	Vlastimil Babka <vbabka@suse.cz>, Mike Rapoport <rppt@kernel.org>,
	Suren Baghdasaryan <surenb@google.com>,
	Michal Hocko <mhocko@suse.com>, Zi Yan <ziy@nvidia.com>,
	Baolin Wang <baolin.wang@linux.alibaba.com>,
	Nico Pache <npache@redhat.com>,
	Ryan Roberts <ryan.roberts@arm.com>, Dev Jain <dev.jain@arm.com>,
	Barry Song <baohua@kernel.org>, Jann Horn <jannh@google.com>,
	Pedro Falcato <pfalcato@suse.de>, Hugh Dickins <hughd@google.com>,
	Oscar Salvador <osalvador@suse.de>,
	Lance Yang <lance.yang@linux.dev>
Subject: Re: [PATCH v3 07/11] mm/rmap: convert "enum rmap_level" to "enum pgtable_level"
Date: Tue, 12 Aug 2025 19:33:06 +0100	[thread overview]
Message-ID: <591fb720-0826-498b-9370-20500242855e@lucifer.local> (raw)
In-Reply-To: <20250811112631.759341-8-david@redhat.com>

On Mon, Aug 11, 2025 at 01:26:27PM +0200, David Hildenbrand wrote:
> Let's factor it out, and convert all checks for unsupported levels to
> BUILD_BUG(). The code is written in a way such that force-inlining will
> optimize out the levels.
>
> Signed-off-by: David Hildenbrand <david@redhat.com>

Nice cleanup! This LGTM, so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

> ---
>  include/linux/pgtable.h |  8 ++++++
>  include/linux/rmap.h    | 60 +++++++++++++++++++----------------------
>  mm/rmap.c               | 56 +++++++++++++++++++++-----------------
>  3 files changed, 66 insertions(+), 58 deletions(-)
>
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index 4c035637eeb77..bff5c4241bf2e 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -1958,6 +1958,14 @@ static inline bool arch_has_pfn_modify_check(void)
>  /* Page-Table Modification Mask */
>  typedef unsigned int pgtbl_mod_mask;
>
> +enum pgtable_level {
> +	PGTABLE_LEVEL_PTE = 0,
> +	PGTABLE_LEVEL_PMD,
> +	PGTABLE_LEVEL_PUD,
> +	PGTABLE_LEVEL_P4D,
> +	PGTABLE_LEVEL_PGD,
> +};
> +
>  #endif /* !__ASSEMBLY__ */
>
>  #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index 6cd020eea37a2..9d40d127bdb78 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -394,18 +394,8 @@ typedef int __bitwise rmap_t;
>  /* The anonymous (sub)page is exclusive to a single process. */
>  #define RMAP_EXCLUSIVE		((__force rmap_t)BIT(0))
>
> -/*
> - * Internally, we're using an enum to specify the granularity. We make the
> - * compiler emit specialized code for each granularity.
> - */
> -enum rmap_level {
> -	RMAP_LEVEL_PTE = 0,
> -	RMAP_LEVEL_PMD,
> -	RMAP_LEVEL_PUD,
> -};
> -
>  static inline void __folio_rmap_sanity_checks(const struct folio *folio,
> -		const struct page *page, int nr_pages, enum rmap_level level)
> +		const struct page *page, int nr_pages, enum pgtable_level level)
>  {
>  	/* hugetlb folios are handled separately. */
>  	VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
> @@ -427,18 +417,18 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio,
>  	VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
>
>  	switch (level) {
> -	case RMAP_LEVEL_PTE:
> +	case PGTABLE_LEVEL_PTE:
>  		break;
> -	case RMAP_LEVEL_PMD:
> +	case PGTABLE_LEVEL_PMD:
>  		/*
>  		 * We don't support folios larger than a single PMD yet. So
> -		 * when RMAP_LEVEL_PMD is set, we assume that we are creating
> +		 * when PGTABLE_LEVEL_PMD is set, we assume that we are creating
>  		 * a single "entire" mapping of the folio.
>  		 */
>  		VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
>  		VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
>  		break;
> -	case RMAP_LEVEL_PUD:
> +	case PGTABLE_LEVEL_PUD:
>  		/*
>  		 * Assume that we are creating a single "entire" mapping of the
>  		 * folio.
> @@ -447,7 +437,7 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio,
>  		VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio);
>  		break;
>  	default:
> -		VM_WARN_ON_ONCE(true);
> +		BUILD_BUG();
>  	}
>
>  	/*
> @@ -567,14 +557,14 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
>
>  static __always_inline void __folio_dup_file_rmap(struct folio *folio,
>  		struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
> -		enum rmap_level level)
> +		enum pgtable_level level)
>  {
>  	const int orig_nr_pages = nr_pages;
>
>  	__folio_rmap_sanity_checks(folio, page, nr_pages, level);
>
>  	switch (level) {
> -	case RMAP_LEVEL_PTE:
> +	case PGTABLE_LEVEL_PTE:
>  		if (!folio_test_large(folio)) {
>  			atomic_inc(&folio->_mapcount);
>  			break;
> @@ -587,11 +577,13 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
>  		}
>  		folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
>  		break;
> -	case RMAP_LEVEL_PMD:
> -	case RMAP_LEVEL_PUD:
> +	case PGTABLE_LEVEL_PMD:
> +	case PGTABLE_LEVEL_PUD:
>  		atomic_inc(&folio->_entire_mapcount);
>  		folio_inc_large_mapcount(folio, dst_vma);
>  		break;
> +	default:
> +		BUILD_BUG();
>  	}
>  }
>
> @@ -609,13 +601,13 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
>  static inline void folio_dup_file_rmap_ptes(struct folio *folio,
>  		struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
>  {
> -	__folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE);
> +	__folio_dup_file_rmap(folio, page, nr_pages, dst_vma, PGTABLE_LEVEL_PTE);
>  }
>
>  static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
>  		struct page *page, struct vm_area_struct *dst_vma)
>  {
> -	__folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE);
> +	__folio_dup_file_rmap(folio, page, 1, dst_vma, PGTABLE_LEVEL_PTE);
>  }
>
>  /**
> @@ -632,7 +624,7 @@ static inline void folio_dup_file_rmap_pmd(struct folio *folio,
>  		struct page *page, struct vm_area_struct *dst_vma)
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, RMAP_LEVEL_PTE);
> +	__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, PGTABLE_LEVEL_PTE);
>  #else
>  	WARN_ON_ONCE(true);
>  #endif
> @@ -640,7 +632,7 @@ static inline void folio_dup_file_rmap_pmd(struct folio *folio,
>
>  static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
>  		struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
> -		struct vm_area_struct *src_vma, enum rmap_level level)
> +		struct vm_area_struct *src_vma, enum pgtable_level level)
>  {
>  	const int orig_nr_pages = nr_pages;
>  	bool maybe_pinned;
> @@ -665,7 +657,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
>  	 * copying if the folio maybe pinned.
>  	 */
>  	switch (level) {
> -	case RMAP_LEVEL_PTE:
> +	case PGTABLE_LEVEL_PTE:
>  		if (unlikely(maybe_pinned)) {
>  			for (i = 0; i < nr_pages; i++)
>  				if (PageAnonExclusive(page + i))
> @@ -687,8 +679,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
>  		} while (page++, --nr_pages > 0);
>  		folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
>  		break;
> -	case RMAP_LEVEL_PMD:
> -	case RMAP_LEVEL_PUD:
> +	case PGTABLE_LEVEL_PMD:
> +	case PGTABLE_LEVEL_PUD:
>  		if (PageAnonExclusive(page)) {
>  			if (unlikely(maybe_pinned))
>  				return -EBUSY;
> @@ -697,6 +689,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
>  		atomic_inc(&folio->_entire_mapcount);
>  		folio_inc_large_mapcount(folio, dst_vma);
>  		break;
> +	default:
> +		BUILD_BUG();
>  	}
>  	return 0;
>  }
> @@ -730,7 +724,7 @@ static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
>  		struct vm_area_struct *src_vma)
>  {
>  	return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
> -					 src_vma, RMAP_LEVEL_PTE);
> +					 src_vma, PGTABLE_LEVEL_PTE);
>  }
>
>  static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
> @@ -738,7 +732,7 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
>  		struct vm_area_struct *src_vma)
>  {
>  	return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
> -					 RMAP_LEVEL_PTE);
> +					 PGTABLE_LEVEL_PTE);
>  }
>
>  /**
> @@ -770,7 +764,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  	return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
> -					 src_vma, RMAP_LEVEL_PMD);
> +					 src_vma, PGTABLE_LEVEL_PMD);
>  #else
>  	WARN_ON_ONCE(true);
>  	return -EBUSY;
> @@ -778,7 +772,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
>  }
>
>  static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
> -		struct page *page, int nr_pages, enum rmap_level level)
> +		struct page *page, int nr_pages, enum pgtable_level level)
>  {
>  	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
>  	VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
> @@ -873,7 +867,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
>  static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
>  		struct page *page)
>  {
> -	return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
> +	return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
>  }
>
>  /**
> @@ -904,7 +898,7 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  	return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
> -					   RMAP_LEVEL_PMD);
> +					   PGTABLE_LEVEL_PMD);
>  #else
>  	WARN_ON_ONCE(true);
>  	return -EBUSY;
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 84a8d8b02ef77..0e9c4041f8687 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1265,7 +1265,7 @@ static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
>
>  static __always_inline void __folio_add_rmap(struct folio *folio,
>  		struct page *page, int nr_pages, struct vm_area_struct *vma,
> -		enum rmap_level level)
> +		enum pgtable_level level)
>  {
>  	atomic_t *mapped = &folio->_nr_pages_mapped;
>  	const int orig_nr_pages = nr_pages;
> @@ -1274,7 +1274,7 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
>  	__folio_rmap_sanity_checks(folio, page, nr_pages, level);
>
>  	switch (level) {
> -	case RMAP_LEVEL_PTE:
> +	case PGTABLE_LEVEL_PTE:
>  		if (!folio_test_large(folio)) {
>  			nr = atomic_inc_and_test(&folio->_mapcount);
>  			break;
> @@ -1300,11 +1300,11 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
>
>  		folio_add_large_mapcount(folio, orig_nr_pages, vma);
>  		break;
> -	case RMAP_LEVEL_PMD:
> -	case RMAP_LEVEL_PUD:
> +	case PGTABLE_LEVEL_PMD:
> +	case PGTABLE_LEVEL_PUD:
>  		first = atomic_inc_and_test(&folio->_entire_mapcount);
>  		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
> -			if (level == RMAP_LEVEL_PMD && first)
> +			if (level == PGTABLE_LEVEL_PMD && first)
>  				nr_pmdmapped = folio_large_nr_pages(folio);
>  			nr = folio_inc_return_large_mapcount(folio, vma);
>  			if (nr == 1)
> @@ -1323,7 +1323,7 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
>  				 * We only track PMD mappings of PMD-sized
>  				 * folios separately.
>  				 */
> -				if (level == RMAP_LEVEL_PMD)
> +				if (level == PGTABLE_LEVEL_PMD)
>  					nr_pmdmapped = nr_pages;
>  				nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
>  				/* Raced ahead of a remove and another add? */
> @@ -1336,6 +1336,8 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
>  		}
>  		folio_inc_large_mapcount(folio, vma);
>  		break;
> +	default:
> +		BUILD_BUG();
>  	}
>  	__folio_mod_stat(folio, nr, nr_pmdmapped);
>  }
> @@ -1427,7 +1429,7 @@ static void __page_check_anon_rmap(const struct folio *folio,
>
>  static __always_inline void __folio_add_anon_rmap(struct folio *folio,
>  		struct page *page, int nr_pages, struct vm_area_struct *vma,
> -		unsigned long address, rmap_t flags, enum rmap_level level)
> +		unsigned long address, rmap_t flags, enum pgtable_level level)
>  {
>  	int i;
>
> @@ -1440,20 +1442,22 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
>
>  	if (flags & RMAP_EXCLUSIVE) {
>  		switch (level) {
> -		case RMAP_LEVEL_PTE:
> +		case PGTABLE_LEVEL_PTE:
>  			for (i = 0; i < nr_pages; i++)
>  				SetPageAnonExclusive(page + i);
>  			break;
> -		case RMAP_LEVEL_PMD:
> +		case PGTABLE_LEVEL_PMD:
>  			SetPageAnonExclusive(page);
>  			break;
> -		case RMAP_LEVEL_PUD:
> +		case PGTABLE_LEVEL_PUD:
>  			/*
>  			 * Keep the compiler happy, we don't support anonymous
>  			 * PUD mappings.
>  			 */
>  			WARN_ON_ONCE(1);
>  			break;
> +		default:
> +			BUILD_BUG();
>  		}
>  	}
>
> @@ -1507,7 +1511,7 @@ void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
>  		rmap_t flags)
>  {
>  	__folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
> -			      RMAP_LEVEL_PTE);
> +			      PGTABLE_LEVEL_PTE);
>  }
>
>  /**
> @@ -1528,7 +1532,7 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  	__folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
> -			      RMAP_LEVEL_PMD);
> +			      PGTABLE_LEVEL_PMD);
>  #else
>  	WARN_ON_ONCE(true);
>  #endif
> @@ -1609,7 +1613,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
>
>  static __always_inline void __folio_add_file_rmap(struct folio *folio,
>  		struct page *page, int nr_pages, struct vm_area_struct *vma,
> -		enum rmap_level level)
> +		enum pgtable_level level)
>  {
>  	VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
>
> @@ -1634,7 +1638,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
>  void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
>  		int nr_pages, struct vm_area_struct *vma)
>  {
> -	__folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
> +	__folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
>  }
>
>  /**
> @@ -1651,7 +1655,7 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
>  		struct vm_area_struct *vma)
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	__folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
> +	__folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
>  #else
>  	WARN_ON_ONCE(true);
>  #endif
> @@ -1672,7 +1676,7 @@ void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
>  {
>  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
>  	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
> -	__folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD);
> +	__folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
>  #else
>  	WARN_ON_ONCE(true);
>  #endif
> @@ -1680,7 +1684,7 @@ void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
>
>  static __always_inline void __folio_remove_rmap(struct folio *folio,
>  		struct page *page, int nr_pages, struct vm_area_struct *vma,
> -		enum rmap_level level)
> +		enum pgtable_level level)
>  {
>  	atomic_t *mapped = &folio->_nr_pages_mapped;
>  	int last = 0, nr = 0, nr_pmdmapped = 0;
> @@ -1689,7 +1693,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
>  	__folio_rmap_sanity_checks(folio, page, nr_pages, level);
>
>  	switch (level) {
> -	case RMAP_LEVEL_PTE:
> +	case PGTABLE_LEVEL_PTE:
>  		if (!folio_test_large(folio)) {
>  			nr = atomic_add_negative(-1, &folio->_mapcount);
>  			break;
> @@ -1719,11 +1723,11 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
>
>  		partially_mapped = nr && atomic_read(mapped);
>  		break;
> -	case RMAP_LEVEL_PMD:
> -	case RMAP_LEVEL_PUD:
> +	case PGTABLE_LEVEL_PMD:
> +	case PGTABLE_LEVEL_PUD:
>  		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
>  			last = atomic_add_negative(-1, &folio->_entire_mapcount);
> -			if (level == RMAP_LEVEL_PMD && last)
> +			if (level == PGTABLE_LEVEL_PMD && last)
>  				nr_pmdmapped = folio_large_nr_pages(folio);
>  			nr = folio_dec_return_large_mapcount(folio, vma);
>  			if (!nr) {
> @@ -1743,7 +1747,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
>  			nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
>  			if (likely(nr < ENTIRELY_MAPPED)) {
>  				nr_pages = folio_large_nr_pages(folio);
> -				if (level == RMAP_LEVEL_PMD)
> +				if (level == PGTABLE_LEVEL_PMD)
>  					nr_pmdmapped = nr_pages;
>  				nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
>  				/* Raced ahead of another remove and an add? */
> @@ -1757,6 +1761,8 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
>
>  		partially_mapped = nr && nr < nr_pmdmapped;
>  		break;
> +	default:
> +		BUILD_BUG();
>  	}
>
>  	/*
> @@ -1796,7 +1802,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
>  void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
>  		int nr_pages, struct vm_area_struct *vma)
>  {
> -	__folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
> +	__folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
>  }
>
>  /**
> @@ -1813,7 +1819,7 @@ void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
>  		struct vm_area_struct *vma)
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	__folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
> +	__folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
>  #else
>  	WARN_ON_ONCE(true);
>  #endif
> @@ -1834,7 +1840,7 @@ void folio_remove_rmap_pud(struct folio *folio, struct page *page,
>  {
>  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
>  	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
> -	__folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD);
> +	__folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
>  #else
>  	WARN_ON_ONCE(true);
>  #endif
> --
> 2.50.1
>

  reply	other threads:[~2025-08-12 18:34 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-11 11:26 [PATCH v3 00/11] mm: vm_normal_page*() improvements David Hildenbrand
2025-08-11 11:26 ` [PATCH v3 01/11] mm/huge_memory: move more common code into insert_pmd() David Hildenbrand
2025-08-12  4:52   ` Lance Yang
2025-08-11 11:26 ` [PATCH v3 02/11] mm/huge_memory: move more common code into insert_pud() David Hildenbrand
2025-08-11 11:26 ` [PATCH v3 03/11] mm/huge_memory: support huge zero folio in vmf_insert_folio_pmd() David Hildenbrand
2025-08-11 11:26 ` [PATCH v3 04/11] fs/dax: use vmf_insert_folio_pmd() to insert the huge zero folio David Hildenbrand
2025-08-11 11:26 ` [PATCH v3 05/11] mm/huge_memory: mark PMD mappings of the huge zero folio special David Hildenbrand
2025-08-12 18:14   ` Lorenzo Stoakes
2025-08-11 11:26 ` [PATCH v3 06/11] powerpc/ptdump: rename "struct pgtable_level" to "struct ptdump_pglevel" David Hildenbrand
2025-08-12 18:23   ` Lorenzo Stoakes
2025-08-12 18:39     ` Christophe Leroy
2025-08-12 18:54       ` Lorenzo Stoakes
2025-08-26 16:28   ` Ritesh Harjani
2025-08-27 13:57     ` David Hildenbrand
2025-08-11 11:26 ` [PATCH v3 07/11] mm/rmap: convert "enum rmap_level" to "enum pgtable_level" David Hildenbrand
2025-08-12 18:33   ` Lorenzo Stoakes [this message]
2025-08-11 11:26 ` [PATCH v3 08/11] mm/memory: convert print_bad_pte() to print_bad_page_map() David Hildenbrand
2025-08-12 18:48   ` Lorenzo Stoakes
2025-08-25 12:31   ` David Hildenbrand
2025-08-26  5:25     ` Lorenzo Stoakes
2025-08-26  6:17       ` David Hildenbrand
2025-08-11 11:26 ` [PATCH v3 09/11] mm/memory: factor out common code from vm_normal_page_*() David Hildenbrand
2025-08-12 19:06   ` Lorenzo Stoakes
2025-08-11 11:26 ` [PATCH v3 10/11] mm: introduce and use vm_normal_page_pud() David Hildenbrand
2025-08-12 19:38   ` Lorenzo Stoakes
2025-08-11 11:26 ` [PATCH v3 11/11] mm: rename vm_ops->find_special_page() to vm_ops->find_normal_page() David Hildenbrand
2025-08-12 19:43   ` Lorenzo Stoakes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=591fb720-0826-498b-9370-20500242855e@lucifer.local \
    --to=lorenzo.stoakes@oracle.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=baohua@kernel.org \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=brauner@kernel.org \
    --cc=christophe.leroy@csgroup.eu \
    --cc=dan.j.williams@intel.com \
    --cc=david@redhat.com \
    --cc=dev.jain@arm.com \
    --cc=hughd@google.com \
    --cc=jack@suse.cz \
    --cc=jannh@google.com \
    --cc=jgross@suse.com \
    --cc=lance.yang@linux.dev \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=maddy@linux.ibm.com \
    --cc=mhocko@suse.com \
    --cc=mpe@ellerman.id.au \
    --cc=npache@redhat.com \
    --cc=npiggin@gmail.com \
    --cc=nvdimm@lists.linux.dev \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=osalvador@suse.de \
    --cc=pfalcato@suse.de \
    --cc=rppt@kernel.org \
    --cc=ryan.roberts@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=xen-devel@lists.xenproject.org \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).