linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code
@ 2025-09-02  5:14 Alistair Popple
  2025-09-02  5:14 ` [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter Alistair Popple
                   ` (3 more replies)
  0 siblings, 4 replies; 10+ messages in thread
From: Alistair Popple @ 2025-09-02  5:14 UTC (permalink / raw)
  To: linux-mm, akpm
  Cc: david, osalvador, jgg, jhubbard, peterx, linux-kernel,
	dan.j.williams, Alistair Popple

Prior to aed877c2b425 ("device/dax: properly refcount device dax pages
when mapping") ZONE_DEVICE pages were not fully reference counted when
mapped into user page tables. Instead GUP would take a reference on the
associated pgmap to ensure the results of pfn_to_page() remained valid.

This is no longer required and most of the code was removed by
fd2825b0760a ("mm/gup: remove pXX_devmap usage from get_user_pages()").
Finish cleaning this up by removing the dead calls to put_dev_pagemap()
and the temporary context struct.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
 mm/gup.c | 67 ++++++++++++++++++++++----------------------------------
 1 file changed, 26 insertions(+), 41 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index adffe663594dc..be5791a43c735 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -28,11 +28,6 @@
 #include "internal.h"
 #include "swap.h"
 
-struct follow_page_context {
-	struct dev_pagemap *pgmap;
-	unsigned int page_mask;
-};
-
 static inline void sanity_check_pinned_pages(struct page **pages,
 					     unsigned long npages)
 {
@@ -661,7 +656,7 @@ static inline bool can_follow_write_pud(pud_t pud, struct page *page,
 
 static struct page *follow_huge_pud(struct vm_area_struct *vma,
 				    unsigned long addr, pud_t *pudp,
-				    int flags, struct follow_page_context *ctx)
+				    int flags, unsigned long *page_mask)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct page *page;
@@ -688,7 +683,7 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
 	if (ret)
 		page = ERR_PTR(ret);
 	else
-		ctx->page_mask = HPAGE_PUD_NR - 1;
+		*page_mask = HPAGE_PUD_NR - 1;
 
 	return page;
 }
@@ -714,7 +709,7 @@ static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
 static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 				    unsigned long addr, pmd_t *pmd,
 				    unsigned int flags,
-				    struct follow_page_context *ctx)
+				    unsigned long *page_mask)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	pmd_t pmdval = *pmd;
@@ -751,7 +746,7 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
 
 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
-	ctx->page_mask = HPAGE_PMD_NR - 1;
+	*page_mask = HPAGE_PMD_NR - 1;
 
 	return page;
 }
@@ -759,7 +754,7 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 #else  /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
 static struct page *follow_huge_pud(struct vm_area_struct *vma,
 				    unsigned long addr, pud_t *pudp,
-				    int flags, struct follow_page_context *ctx)
+				    int flags, unsigned long *page_mask)
 {
 	return NULL;
 }
@@ -767,7 +762,7 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
 static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 				    unsigned long addr, pmd_t *pmd,
 				    unsigned int flags,
-				    struct follow_page_context *ctx)
+				    unsigned long *page_mask)
 {
 	return NULL;
 }
@@ -813,8 +808,7 @@ static inline bool can_follow_write_pte(pte_t pte, struct page *page,
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
-		unsigned long address, pmd_t *pmd, unsigned int flags,
-		struct dev_pagemap **pgmap)
+		unsigned long address, pmd_t *pmd, unsigned int flags)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct folio *folio;
@@ -912,7 +906,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 				    unsigned long address, pud_t *pudp,
 				    unsigned int flags,
-				    struct follow_page_context *ctx)
+				    unsigned long *page_mask)
 {
 	pmd_t *pmd, pmdval;
 	spinlock_t *ptl;
@@ -926,7 +920,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 	if (!pmd_present(pmdval))
 		return no_page_table(vma, flags, address);
 	if (likely(!pmd_leaf(pmdval)))
-		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
+		return follow_page_pte(vma, address, pmd, flags);
 
 	if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
 		return no_page_table(vma, flags, address);
@@ -939,16 +933,16 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 	}
 	if (unlikely(!pmd_leaf(pmdval))) {
 		spin_unlock(ptl);
-		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
+		return follow_page_pte(vma, address, pmd, flags);
 	}
 	if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
 		spin_unlock(ptl);
 		split_huge_pmd(vma, pmd, address);
 		/* If pmd was left empty, stuff a page table in there quickly */
 		return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
-			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
+			follow_page_pte(vma, address, pmd, flags);
 	}
-	page = follow_huge_pmd(vma, address, pmd, flags, ctx);
+	page = follow_huge_pmd(vma, address, pmd, flags, page_mask);
 	spin_unlock(ptl);
 	return page;
 }
@@ -956,7 +950,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 static struct page *follow_pud_mask(struct vm_area_struct *vma,
 				    unsigned long address, p4d_t *p4dp,
 				    unsigned int flags,
-				    struct follow_page_context *ctx)
+				    unsigned long *page_mask)
 {
 	pud_t *pudp, pud;
 	spinlock_t *ptl;
@@ -969,7 +963,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
 		return no_page_table(vma, flags, address);
 	if (pud_leaf(pud)) {
 		ptl = pud_lock(mm, pudp);
-		page = follow_huge_pud(vma, address, pudp, flags, ctx);
+		page = follow_huge_pud(vma, address, pudp, flags, page_mask);
 		spin_unlock(ptl);
 		if (page)
 			return page;
@@ -978,13 +972,13 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
 	if (unlikely(pud_bad(pud)))
 		return no_page_table(vma, flags, address);
 
-	return follow_pmd_mask(vma, address, pudp, flags, ctx);
+	return follow_pmd_mask(vma, address, pudp, flags, page_mask);
 }
 
 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
 				    unsigned long address, pgd_t *pgdp,
 				    unsigned int flags,
-				    struct follow_page_context *ctx)
+				    unsigned long *page_mask)
 {
 	p4d_t *p4dp, p4d;
 
@@ -995,7 +989,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
 	if (!p4d_present(p4d) || p4d_bad(p4d))
 		return no_page_table(vma, flags, address);
 
-	return follow_pud_mask(vma, address, p4dp, flags, ctx);
+	return follow_pud_mask(vma, address, p4dp, flags, page_mask);
 }
 
 /**
@@ -1003,20 +997,16 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
  * @vma: vm_area_struct mapping @address
  * @address: virtual address to look up
  * @flags: flags modifying lookup behaviour
- * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
- *       pointer to output page_mask
+ * @page_mask: a pointer to output page_mask
  *
  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
  *
- * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
- * the device's dev_pagemap metadata to avoid repeating expensive lookups.
- *
  * When getting an anonymous page and the caller has to trigger unsharing
  * of a shared anonymous page first, -EMLINK is returned. The caller should
  * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
  * relevant with FOLL_PIN and !FOLL_WRITE.
  *
- * On output, the @ctx->page_mask is set according to the size of the page.
+ * On output, @page_mask is set according to the size of the page.
  *
  * Return: the mapped (struct page *), %NULL if no mapping exists, or
  * an error pointer if there is a mapping to something not represented
@@ -1024,7 +1014,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
  */
 static struct page *follow_page_mask(struct vm_area_struct *vma,
 			      unsigned long address, unsigned int flags,
-			      struct follow_page_context *ctx)
+			      unsigned long *page_mask)
 {
 	pgd_t *pgd;
 	struct mm_struct *mm = vma->vm_mm;
@@ -1032,13 +1022,13 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
 
 	vma_pgtable_walk_begin(vma);
 
-	ctx->page_mask = 0;
+	*page_mask = 0;
 	pgd = pgd_offset(mm, address);
 
 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
 		page = no_page_table(vma, flags, address);
 	else
-		page = follow_p4d_mask(vma, address, pgd, flags, ctx);
+		page = follow_p4d_mask(vma, address, pgd, flags, page_mask);
 
 	vma_pgtable_walk_end(vma);
 
@@ -1376,7 +1366,7 @@ static long __get_user_pages(struct mm_struct *mm,
 {
 	long ret = 0, i = 0;
 	struct vm_area_struct *vma = NULL;
-	struct follow_page_context ctx = { NULL };
+	unsigned long page_mask = 0;
 
 	if (!nr_pages)
 		return 0;
@@ -1418,7 +1408,7 @@ static long __get_user_pages(struct mm_struct *mm,
 						pages ? &page : NULL);
 				if (ret)
 					goto out;
-				ctx.page_mask = 0;
+				page_mask = 0;
 				goto next_page;
 			}
 
@@ -1441,7 +1431,7 @@ static long __get_user_pages(struct mm_struct *mm,
 		}
 		cond_resched();
 
-		page = follow_page_mask(vma, start, gup_flags, &ctx);
+		page = follow_page_mask(vma, start, gup_flags, &page_mask);
 		if (!page || PTR_ERR(page) == -EMLINK) {
 			ret = faultin_page(vma, start, gup_flags,
 					   PTR_ERR(page) == -EMLINK, locked);
@@ -1474,7 +1464,7 @@ static long __get_user_pages(struct mm_struct *mm,
 			goto out;
 		}
 next_page:
-		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
+		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
 		if (page_increm > nr_pages)
 			page_increm = nr_pages;
 
@@ -1524,8 +1514,6 @@ static long __get_user_pages(struct mm_struct *mm,
 		nr_pages -= page_increm;
 	} while (nr_pages);
 out:
-	if (ctx.pgmap)
-		put_dev_pagemap(ctx.pgmap);
 	return i ? i : ret;
 }
 
@@ -2853,7 +2841,6 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
 		unsigned long end, unsigned int flags, struct page **pages,
 		int *nr)
 {
-	struct dev_pagemap *pgmap = NULL;
 	int ret = 0;
 	pte_t *ptep, *ptem;
 
@@ -2926,8 +2913,6 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
 	ret = 1;
 
 pte_unmap:
-	if (pgmap)
-		put_dev_pagemap(pgmap);
 	pte_unmap(ptem);
 	return ret;
 }
-- 
2.47.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter
  2025-09-02  5:14 [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code Alistair Popple
@ 2025-09-02  5:14 ` Alistair Popple
  2025-09-02  7:47   ` David Hildenbrand
                     ` (3 more replies)
  2025-09-02  7:46 ` [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code David Hildenbrand
                   ` (2 subsequent siblings)
  3 siblings, 4 replies; 10+ messages in thread
From: Alistair Popple @ 2025-09-02  5:14 UTC (permalink / raw)
  To: linux-mm, akpm
  Cc: david, osalvador, jgg, jhubbard, peterx, linux-kernel,
	dan.j.williams, Alistair Popple

GUP no longer uses get_dev_pagemap(). As it was the only user of the
get_dev_pagemap() pgmap caching feature it can be removed.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
 include/linux/memremap.h |  6 ++----
 mm/memory_hotplug.c      |  2 +-
 mm/memremap.c            | 22 ++++------------------
 3 files changed, 7 insertions(+), 23 deletions(-)

diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 4aa151914eabb..5cfc2ae6c98bd 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -204,8 +204,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid);
 void memunmap_pages(struct dev_pagemap *pgmap);
 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
-struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
-		struct dev_pagemap *pgmap);
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
 
 unsigned long memremap_compat_align(void);
@@ -227,8 +226,7 @@ static inline void devm_memunmap_pages(struct device *dev,
 {
 }
 
-static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
-		struct dev_pagemap *pgmap)
+static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
 {
 	return NULL;
 }
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1f15af712bc34..14e98c89f5b4d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -375,7 +375,7 @@ struct page *pfn_to_online_page(unsigned long pfn)
 	 * the section may be 'offline' but 'valid'. Only
 	 * get_dev_pagemap() can determine sub-section online status.
 	 */
-	pgmap = get_dev_pagemap(pfn, NULL);
+	pgmap = get_dev_pagemap(pfn);
 	put_dev_pagemap(pgmap);
 
 	/* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
diff --git a/mm/memremap.c b/mm/memremap.c
index b0ce0d8254bd8..3b4717cf3a6b2 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -153,14 +153,14 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
 				"altmap not supported for multiple ranges\n"))
 		return -EINVAL;
 
-	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
+	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start));
 	if (conflict_pgmap) {
 		WARN(1, "Conflicting mapping in same section\n");
 		put_dev_pagemap(conflict_pgmap);
 		return -ENOMEM;
 	}
 
-	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
+	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end));
 	if (conflict_pgmap) {
 		WARN(1, "Conflicting mapping in same section\n");
 		put_dev_pagemap(conflict_pgmap);
@@ -394,26 +394,12 @@ EXPORT_SYMBOL_GPL(devm_memunmap_pages);
 /**
  * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
  * @pfn: page frame number to lookup page_map
- * @pgmap: optional known pgmap that already has a reference
- *
- * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
- * is non-NULL but does not cover @pfn the reference to it will be released.
  */
-struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
-		struct dev_pagemap *pgmap)
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
 {
+	struct dev_pagemap *pgmap;
 	resource_size_t phys = PFN_PHYS(pfn);
 
-	/*
-	 * In the cached case we're already holding a live reference.
-	 */
-	if (pgmap) {
-		if (phys >= pgmap->range.start && phys <= pgmap->range.end)
-			return pgmap;
-		put_dev_pagemap(pgmap);
-	}
-
-	/* fall back to slow path lookup */
 	rcu_read_lock();
 	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
 	if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))
-- 
2.47.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code
  2025-09-02  5:14 [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code Alistair Popple
  2025-09-02  5:14 ` [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter Alistair Popple
@ 2025-09-02  7:46 ` David Hildenbrand
  2025-09-02 22:52   ` Alistair Popple
  2025-09-02 12:06 ` Jason Gunthorpe
  2025-09-03  1:17 ` dan.j.williams
  3 siblings, 1 reply; 10+ messages in thread
From: David Hildenbrand @ 2025-09-02  7:46 UTC (permalink / raw)
  To: Alistair Popple, linux-mm, akpm
  Cc: osalvador, jgg, jhubbard, peterx, linux-kernel, dan.j.williams

On 02.09.25 07:14, Alistair Popple wrote:
> Prior to aed877c2b425 ("device/dax: properly refcount device dax pages

"to commit aed877c2b425" ...

> when mapping") ZONE_DEVICE pages were not fully reference counted when
> mapped into user page tables. Instead GUP would take a reference on the
> associated pgmap to ensure the results of pfn_to_page() remained valid.
> 
> This is no longer required and most of the code was removed by
> fd2825b0760a ("mm/gup: remove pXX_devmap usage from get_user_pages()").

"by commit fd2825b0760a"

Otherwise checkpatch complains.

> Finish cleaning this up by removing the dead calls to put_dev_pagemap()
> and the temporary context struct.

[...]

>   {
> @@ -661,7 +656,7 @@ static inline bool can_follow_write_pud(pud_t pud, struct page *page,
>   
>   static struct page *follow_huge_pud(struct vm_area_struct *vma,
>   				    unsigned long addr, pud_t *pudp,
> -				    int flags, struct follow_page_context *ctx)
> +				    int flags, unsigned long *page_mask)
>   {
>   	struct mm_struct *mm = vma->vm_mm;
>   	struct page *page;
> @@ -688,7 +683,7 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
>   	if (ret)
>   		page = ERR_PTR(ret);
>   	else
> -		ctx->page_mask = HPAGE_PUD_NR - 1;
> +		*page_mask = HPAGE_PUD_NR - 1;

At some point we should remove that page_mask thingy as well and handle 
it like gup-fast: let follow_* will in the array directly.


Lovely

Acked-by: David Hildenbrand <david@redhat.com>

-- 
Cheers

David / dhildenb


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter
  2025-09-02  5:14 ` [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter Alistair Popple
@ 2025-09-02  7:47   ` David Hildenbrand
  2025-09-02 12:06   ` Jason Gunthorpe
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 10+ messages in thread
From: David Hildenbrand @ 2025-09-02  7:47 UTC (permalink / raw)
  To: Alistair Popple, linux-mm, akpm
  Cc: osalvador, jgg, jhubbard, peterx, linux-kernel, dan.j.williams

On 02.09.25 07:14, Alistair Popple wrote:
> GUP no longer uses get_dev_pagemap(). As it was the only user of the
> get_dev_pagemap() pgmap caching feature it can be removed.
> 
> Signed-off-by: Alistair Popple <apopple@nvidia.com>
> ---

Thanks!

Acked-by: David Hildenbrand <david@redhat.com>

-- 
Cheers

David / dhildenb


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code
  2025-09-02  5:14 [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code Alistair Popple
  2025-09-02  5:14 ` [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter Alistair Popple
  2025-09-02  7:46 ` [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code David Hildenbrand
@ 2025-09-02 12:06 ` Jason Gunthorpe
  2025-09-03  1:17 ` dan.j.williams
  3 siblings, 0 replies; 10+ messages in thread
From: Jason Gunthorpe @ 2025-09-02 12:06 UTC (permalink / raw)
  To: Alistair Popple
  Cc: linux-mm, akpm, david, osalvador, jhubbard, peterx, linux-kernel,
	dan.j.williams

On Tue, Sep 02, 2025 at 03:14:20PM +1000, Alistair Popple wrote:
> Prior to aed877c2b425 ("device/dax: properly refcount device dax pages
> when mapping") ZONE_DEVICE pages were not fully reference counted when
> mapped into user page tables. Instead GUP would take a reference on the
> associated pgmap to ensure the results of pfn_to_page() remained valid.
> 
> This is no longer required and most of the code was removed by
> fd2825b0760a ("mm/gup: remove pXX_devmap usage from get_user_pages()").
> Finish cleaning this up by removing the dead calls to put_dev_pagemap()
> and the temporary context struct.
> 
> Signed-off-by: Alistair Popple <apopple@nvidia.com>
> ---
>  mm/gup.c | 67 ++++++++++++++++++++++----------------------------------
>  1 file changed, 26 insertions(+), 41 deletions(-)

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>

Yay! This never made alot of sense :\

Jason

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter
  2025-09-02  5:14 ` [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter Alistair Popple
  2025-09-02  7:47   ` David Hildenbrand
@ 2025-09-02 12:06   ` Jason Gunthorpe
  2025-09-02 20:31   ` kernel test robot
  2025-09-03  1:20   ` dan.j.williams
  3 siblings, 0 replies; 10+ messages in thread
From: Jason Gunthorpe @ 2025-09-02 12:06 UTC (permalink / raw)
  To: Alistair Popple
  Cc: linux-mm, akpm, david, osalvador, jhubbard, peterx, linux-kernel,
	dan.j.williams

On Tue, Sep 02, 2025 at 03:14:21PM +1000, Alistair Popple wrote:
> GUP no longer uses get_dev_pagemap(). As it was the only user of the
> get_dev_pagemap() pgmap caching feature it can be removed.
> 
> Signed-off-by: Alistair Popple <apopple@nvidia.com>
> ---
>  include/linux/memremap.h |  6 ++----
>  mm/memory_hotplug.c      |  2 +-
>  mm/memremap.c            | 22 ++++------------------
>  3 files changed, 7 insertions(+), 23 deletions(-)

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>

Jason

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter
  2025-09-02  5:14 ` [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter Alistair Popple
  2025-09-02  7:47   ` David Hildenbrand
  2025-09-02 12:06   ` Jason Gunthorpe
@ 2025-09-02 20:31   ` kernel test robot
  2025-09-03  1:20   ` dan.j.williams
  3 siblings, 0 replies; 10+ messages in thread
From: kernel test robot @ 2025-09-02 20:31 UTC (permalink / raw)
  To: Alistair Popple, linux-mm, akpm
  Cc: oe-kbuild-all, david, osalvador, jgg, jhubbard, peterx,
	linux-kernel, dan.j.williams, Alistair Popple

Hi Alistair,

kernel test robot noticed the following build errors:

[auto build test ERROR on akpm-mm/mm-everything]

url:    https://github.com/intel-lab-lkp/linux/commits/Alistair-Popple/mm-memremap-Remove-unused-get_dev_pagemap-parameter/20250902-131811
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20250902051421.162498-2-apopple%40nvidia.com
patch subject: [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter
config: i386-buildonly-randconfig-001-20250903 (https://download.01.org/0day-ci/archive/20250903/202509030434.fEPVFkG4-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14+deb12u1) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250903/202509030434.fEPVFkG4-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202509030434.fEPVFkG4-lkp@intel.com/

All errors (new ones prefixed by >>):

   mm/memory-failure.c: In function 'memory_failure':
>> mm/memory-failure.c:2269:33: error: too many arguments to function 'get_dev_pagemap'
    2269 |                         pgmap = get_dev_pagemap(pfn, NULL);
         |                                 ^~~~~~~~~~~~~~~
   In file included from include/linux/mm.h:33,
                    from mm/memory-failure.c:40:
   include/linux/memremap.h:236:35: note: declared here
     236 | static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
         |                                   ^~~~~~~~~~~~~~~


vim +/get_dev_pagemap +2269 mm/memory-failure.c

1a3798dececa8c Jane Chu                2024-05-24  2218  
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2219  /**
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2220   * memory_failure - Handle memory failure of a page.
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2221   * @pfn: Page Number of the corrupted page
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2222   * @flags: fine tune action taken
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2223   *
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2224   * This function is called by the low level machine check code
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2225   * of an architecture when it detects hardware memory corruption
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2226   * of a page. It tries its best to recover, which includes
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2227   * dropping pages, killing processes etc.
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2228   *
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2229   * The function is primarily of use for corruptions that
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2230   * happen outside the current execution context (e.g. when
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2231   * detected by a background scrubber)
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2232   *
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2233   * Must run in process context (e.g. a work queue) with interrupts
5885c6a62533cb Miaohe Lin              2023-07-11  2234   * enabled and no spinlocks held.
d1fe111fb62a1c luofei                  2022-03-22  2235   *
d2734f044f8483 Shuai Xue               2025-03-12  2236   * Return:
d2734f044f8483 Shuai Xue               2025-03-12  2237   *   0             - success,
d2734f044f8483 Shuai Xue               2025-03-12  2238   *   -ENXIO        - memory not managed by the kernel
d2734f044f8483 Shuai Xue               2025-03-12  2239   *   -EOPNOTSUPP   - hwpoison_filter() filtered the error event,
d2734f044f8483 Shuai Xue               2025-03-12  2240   *   -EHWPOISON    - the page was already poisoned, potentially
d2734f044f8483 Shuai Xue               2025-03-12  2241   *                   kill process,
d2734f044f8483 Shuai Xue               2025-03-12  2242   *   other negative values - failure.
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2243   */
83b57531c58f41 Eric W. Biederman       2017-07-09  2244  int memory_failure(unsigned long pfn, int flags)
6a46079cf57a7f Andi Kleen              2009-09-16  2245  {
6a46079cf57a7f Andi Kleen              2009-09-16  2246  	struct page *p;
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2247) 	struct folio *folio;
6100e34b2526e1 Dan Williams            2018-07-13  2248  	struct dev_pagemap *pgmap;
171936ddaf97e6 Tony Luck               2021-06-24  2249  	int res = 0;
524fca1e7356f8 Naoya Horiguchi         2013-02-22  2250  	unsigned long page_flags;
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2251  	bool retry = true;
405ce051236cc6 Naoya Horiguchi         2022-04-21  2252  	int hugetlb = 0;
6a46079cf57a7f Andi Kleen              2009-09-16  2253  
6a46079cf57a7f Andi Kleen              2009-09-16  2254  	if (!sysctl_memory_failure_recovery)
83b57531c58f41 Eric W. Biederman       2017-07-09  2255  		panic("Memory failure on page %lx", pfn);
6a46079cf57a7f Andi Kleen              2009-09-16  2256  
03b122da74b22f Tony Luck               2021-10-26  2257  	mutex_lock(&mf_mutex);
03b122da74b22f Tony Luck               2021-10-26  2258  
67f22ba7750f94 zhenwei pi              2022-06-15  2259  	if (!(flags & MF_SW_SIMULATED))
67f22ba7750f94 zhenwei pi              2022-06-15  2260  		hw_memory_failure = true;
67f22ba7750f94 zhenwei pi              2022-06-15  2261  
96c804a6ae8c59 David Hildenbrand       2019-10-18  2262  	p = pfn_to_online_page(pfn);
96c804a6ae8c59 David Hildenbrand       2019-10-18  2263  	if (!p) {
03b122da74b22f Tony Luck               2021-10-26  2264  		res = arch_memory_failure(pfn, flags);
03b122da74b22f Tony Luck               2021-10-26  2265  		if (res == 0)
03b122da74b22f Tony Luck               2021-10-26  2266  			goto unlock_mutex;
03b122da74b22f Tony Luck               2021-10-26  2267  
96c804a6ae8c59 David Hildenbrand       2019-10-18  2268  		if (pfn_valid(pfn)) {
96c804a6ae8c59 David Hildenbrand       2019-10-18 @2269  			pgmap = get_dev_pagemap(pfn, NULL);
d51b68469bc780 Miaohe Lin              2023-07-01  2270  			put_ref_page(pfn, flags);
03b122da74b22f Tony Luck               2021-10-26  2271  			if (pgmap) {
03b122da74b22f Tony Luck               2021-10-26  2272  				res = memory_failure_dev_pagemap(pfn, flags,
96c804a6ae8c59 David Hildenbrand       2019-10-18  2273  								 pgmap);
03b122da74b22f Tony Luck               2021-10-26  2274  				goto unlock_mutex;
03b122da74b22f Tony Luck               2021-10-26  2275  			}
96c804a6ae8c59 David Hildenbrand       2019-10-18  2276  		}
96f96763de26d6 Kefeng Wang             2022-07-26  2277  		pr_err("%#lx: memory outside kernel control\n", pfn);
03b122da74b22f Tony Luck               2021-10-26  2278  		res = -ENXIO;
03b122da74b22f Tony Luck               2021-10-26  2279  		goto unlock_mutex;
6a46079cf57a7f Andi Kleen              2009-09-16  2280  	}
6a46079cf57a7f Andi Kleen              2009-09-16  2281  
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2282  try_again:
405ce051236cc6 Naoya Horiguchi         2022-04-21  2283  	res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
405ce051236cc6 Naoya Horiguchi         2022-04-21  2284  	if (hugetlb)
171936ddaf97e6 Tony Luck               2021-06-24  2285  		goto unlock_mutex;
171936ddaf97e6 Tony Luck               2021-06-24  2286  
6a46079cf57a7f Andi Kleen              2009-09-16  2287  	if (TestSetPageHWPoison(p)) {
47af12bae17f99 Aili Yao                2021-06-24  2288  		res = -EHWPOISON;
a3f5d80ea401ac Naoya Horiguchi         2021-06-28  2289  		if (flags & MF_ACTION_REQUIRED)
a3f5d80ea401ac Naoya Horiguchi         2021-06-28  2290  			res = kill_accessing_process(current, pfn, flags);
f361e2462e8ccc Naoya Horiguchi         2022-04-28  2291  		if (flags & MF_COUNT_INCREASED)
f361e2462e8ccc Naoya Horiguchi         2022-04-28  2292  			put_page(p);
b8b9488d50b715 Jane Chu                2024-05-24  2293  		action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
171936ddaf97e6 Tony Luck               2021-06-24  2294  		goto unlock_mutex;
6a46079cf57a7f Andi Kleen              2009-09-16  2295  	}
6a46079cf57a7f Andi Kleen              2009-09-16  2296  
6a46079cf57a7f Andi Kleen              2009-09-16  2297  	/*
6a46079cf57a7f Andi Kleen              2009-09-16  2298  	 * We need/can do nothing about count=0 pages.
6a46079cf57a7f Andi Kleen              2009-09-16  2299  	 * 1) it's a free page, and therefore in safe hand:
9cf2819159d5a3 Miaohe Lin              2022-08-30  2300  	 *    check_new_page() will be the gate keeper.
761ad8d7c7b548 Naoya Horiguchi         2017-07-10  2301  	 * 2) it's part of a non-compound high order page.
6a46079cf57a7f Andi Kleen              2009-09-16  2302  	 *    Implies some kernel user: cannot stop them from
6a46079cf57a7f Andi Kleen              2009-09-16  2303  	 *    R/W the page; let's pray that the page has been
6a46079cf57a7f Andi Kleen              2009-09-16  2304  	 *    used and will be freed some time later.
6a46079cf57a7f Andi Kleen              2009-09-16  2305  	 * In fact it's dangerous to directly bump up page count from 0,
1c4c3b99c03d3e Jiang Biao              2018-08-21  2306  	 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
6a46079cf57a7f Andi Kleen              2009-09-16  2307  	 */
0ed950d1f28142 Naoya Horiguchi         2021-06-28  2308  	if (!(flags & MF_COUNT_INCREASED)) {
0ed950d1f28142 Naoya Horiguchi         2021-06-28  2309  		res = get_hwpoison_page(p, flags);
0ed950d1f28142 Naoya Horiguchi         2021-06-28  2310  		if (!res) {
8d22ba1b74aa94 Wu Fengguang            2009-12-16  2311  			if (is_free_buddy_page(p)) {
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2312  				if (take_page_off_buddy(p)) {
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2313  					page_ref_inc(p);
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2314  					res = MF_RECOVERED;
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2315  				} else {
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2316  					/* We lost the race, try again */
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2317  					if (retry) {
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2318  						ClearPageHWPoison(p);
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2319  						retry = false;
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2320  						goto try_again;
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2321  					}
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2322  					res = MF_FAILED;
a8b2c2ce89d4e0 Oscar Salvador          2020-12-14  2323  				}
b66d00dfebe79e Kefeng Wang             2022-10-21  2324  				res = action_result(pfn, MF_MSG_BUDDY, res);
8d22ba1b74aa94 Wu Fengguang            2009-12-16  2325  			} else {
b66d00dfebe79e Kefeng Wang             2022-10-21  2326  				res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
8d22ba1b74aa94 Wu Fengguang            2009-12-16  2327  			}
171936ddaf97e6 Tony Luck               2021-06-24  2328  			goto unlock_mutex;
0ed950d1f28142 Naoya Horiguchi         2021-06-28  2329  		} else if (res < 0) {
b8b9488d50b715 Jane Chu                2024-05-24  2330  			res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
0ed950d1f28142 Naoya Horiguchi         2021-06-28  2331  			goto unlock_mutex;
0ed950d1f28142 Naoya Horiguchi         2021-06-28  2332  		}
6a46079cf57a7f Andi Kleen              2009-09-16  2333  	}
6a46079cf57a7f Andi Kleen              2009-09-16  2334  
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2335) 	folio = page_folio(p);
9b0ab153d76972 Jane Chu                2024-05-24  2336  
9b0ab153d76972 Jane Chu                2024-05-24  2337  	/* filter pages that are protected from hwpoison test by users */
9b0ab153d76972 Jane Chu                2024-05-24  2338  	folio_lock(folio);
9b0ab153d76972 Jane Chu                2024-05-24  2339  	if (hwpoison_filter(p)) {
9b0ab153d76972 Jane Chu                2024-05-24  2340  		ClearPageHWPoison(p);
9b0ab153d76972 Jane Chu                2024-05-24  2341  		folio_unlock(folio);
9b0ab153d76972 Jane Chu                2024-05-24  2342  		folio_put(folio);
9b0ab153d76972 Jane Chu                2024-05-24  2343  		res = -EOPNOTSUPP;
9b0ab153d76972 Jane Chu                2024-05-24  2344  		goto unlock_mutex;
9b0ab153d76972 Jane Chu                2024-05-24  2345  	}
9b0ab153d76972 Jane Chu                2024-05-24  2346  	folio_unlock(folio);
9b0ab153d76972 Jane Chu                2024-05-24  2347  
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2348) 	if (folio_test_large(folio)) {
eac96c3efdb593 Yang Shi                2021-10-28  2349  		/*
eac96c3efdb593 Yang Shi                2021-10-28  2350  		 * The flag must be set after the refcount is bumped
eac96c3efdb593 Yang Shi                2021-10-28  2351  		 * otherwise it may race with THP split.
eac96c3efdb593 Yang Shi                2021-10-28  2352  		 * And the flag can't be set in get_hwpoison_page() since
eac96c3efdb593 Yang Shi                2021-10-28  2353  		 * it is called by soft offline too and it is just called
5885c6a62533cb Miaohe Lin              2023-07-11  2354  		 * for !MF_COUNT_INCREASED.  So here seems to be the best
eac96c3efdb593 Yang Shi                2021-10-28  2355  		 * place.
eac96c3efdb593 Yang Shi                2021-10-28  2356  		 *
eac96c3efdb593 Yang Shi                2021-10-28  2357  		 * Don't need care about the above error handling paths for
eac96c3efdb593 Yang Shi                2021-10-28  2358  		 * get_hwpoison_page() since they handle either free page
eac96c3efdb593 Yang Shi                2021-10-28  2359  		 * or unhandlable page.  The refcount is bumped iff the
eac96c3efdb593 Yang Shi                2021-10-28  2360  		 * page is a valid handlable page.
eac96c3efdb593 Yang Shi                2021-10-28  2361  		 */
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2362) 		folio_set_has_hwpoisoned(folio);
1a3798dececa8c Jane Chu                2024-05-24  2363  		if (try_to_split_thp_page(p, false) < 0) {
1a3798dececa8c Jane Chu                2024-05-24  2364  			res = -EHWPOISON;
1a3798dececa8c Jane Chu                2024-05-24  2365  			kill_procs_now(p, pfn, flags, folio);
1a3798dececa8c Jane Chu                2024-05-24  2366  			put_page(p);
1a3798dececa8c Jane Chu                2024-05-24  2367  			action_result(pfn, MF_MSG_UNSPLIT_THP, MF_FAILED);
171936ddaf97e6 Tony Luck               2021-06-24  2368  			goto unlock_mutex;
5d1fd5dc877bc1 Naoya Horiguchi         2020-10-15  2369  		}
415c64c1453aa2 Naoya Horiguchi         2015-06-24  2370  		VM_BUG_ON_PAGE(!page_count(p), p);
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2371) 		folio = page_folio(p);
415c64c1453aa2 Naoya Horiguchi         2015-06-24  2372  	}
415c64c1453aa2 Naoya Horiguchi         2015-06-24  2373  
e43c3afb367112 Wu Fengguang            2009-09-29  2374  	/*
e43c3afb367112 Wu Fengguang            2009-09-29  2375  	 * We ignore non-LRU pages for good reasons.
e43c3afb367112 Wu Fengguang            2009-09-29  2376  	 * - PG_locked is only well defined for LRU pages and a few others
48c935ad88f5be Kirill A. Shutemov      2016-01-15  2377  	 * - to avoid races with __SetPageLocked()
e43c3afb367112 Wu Fengguang            2009-09-29  2378  	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
e43c3afb367112 Wu Fengguang            2009-09-29  2379  	 * The check (unnecessarily) ignores LRU pages being isolated and
e43c3afb367112 Wu Fengguang            2009-09-29  2380  	 * walked by the page reclaim code, however that's not a big loss.
e43c3afb367112 Wu Fengguang            2009-09-29  2381  	 */
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2382) 	shake_folio(folio);
e43c3afb367112 Wu Fengguang            2009-09-29  2383  
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2384) 	folio_lock(folio);
847ce401df392b Wu Fengguang            2009-12-16  2385  
f37d4298aa7f8b Andi Kleen              2014-08-06  2386  	/*
75ee64b3c9a969 Miaohe Lin              2022-03-22  2387  	 * We're only intended to deal with the non-Compound page here.
8a78882dac1c8c Miaohe Lin              2024-07-08  2388  	 * The page cannot become compound pages again as folio has been
8a78882dac1c8c Miaohe Lin              2024-07-08  2389  	 * splited and extra refcnt is held.
f37d4298aa7f8b Andi Kleen              2014-08-06  2390  	 */
8a78882dac1c8c Miaohe Lin              2024-07-08  2391  	WARN_ON(folio_test_large(folio));
f37d4298aa7f8b Andi Kleen              2014-08-06  2392  
524fca1e7356f8 Naoya Horiguchi         2013-02-22  2393  	/*
524fca1e7356f8 Naoya Horiguchi         2013-02-22  2394  	 * We use page flags to determine what action should be taken, but
524fca1e7356f8 Naoya Horiguchi         2013-02-22  2395  	 * the flags can be modified by the error containment action.  One
524fca1e7356f8 Naoya Horiguchi         2013-02-22  2396  	 * example is an mlocked page, where PG_mlocked is cleared by
4d8f7418e8ba36 David Hildenbrand       2023-12-20  2397  	 * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
4d8f7418e8ba36 David Hildenbrand       2023-12-20  2398  	 * status correctly, we save a copy of the page flags at this time.
524fca1e7356f8 Naoya Horiguchi         2013-02-22  2399  	 */
378d05afc7b1bd Matthew Wilcox (Oracle  2025-08-05  2400) 	page_flags = folio->flags.f;
524fca1e7356f8 Naoya Horiguchi         2013-02-22  2401  
e8675d291ac007 yangerkun               2021-06-15  2402  	/*
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2403) 	 * __munlock_folio() may clear a writeback folio's LRU flag without
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2404) 	 * the folio lock. We need to wait for writeback completion for this
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2405) 	 * folio or it may trigger a vfs BUG while evicting inode.
e8675d291ac007 yangerkun               2021-06-15  2406  	 */
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2407) 	if (!folio_test_lru(folio) && !folio_test_writeback(folio))
0bc1f8b0682caa Chen Yucong             2014-07-02  2408  		goto identify_page_state;
0bc1f8b0682caa Chen Yucong             2014-07-02  2409  
6edd6cc66201e0 Naoya Horiguchi         2014-06-04  2410  	/*
6edd6cc66201e0 Naoya Horiguchi         2014-06-04  2411  	 * It's very difficult to mess with pages currently under IO
6edd6cc66201e0 Naoya Horiguchi         2014-06-04  2412  	 * and in many cases impossible, so we just avoid it here.
6edd6cc66201e0 Naoya Horiguchi         2014-06-04  2413  	 */
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2414) 	folio_wait_writeback(folio);
6a46079cf57a7f Andi Kleen              2009-09-16  2415  
6a46079cf57a7f Andi Kleen              2009-09-16  2416  	/*
6a46079cf57a7f Andi Kleen              2009-09-16  2417  	 * Now take care of user space mappings.
6ffcd825e7d041 Matthew Wilcox (Oracle  2022-06-28  2418) 	 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
6a46079cf57a7f Andi Kleen              2009-09-16  2419  	 */
03468a0f52893b Matthew Wilcox (Oracle  2024-04-12  2420) 	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
b8b9488d50b715 Jane Chu                2024-05-24  2421  		res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED);
171936ddaf97e6 Tony Luck               2021-06-24  2422  		goto unlock_page;
1668bfd5be9d8a Wu Fengguang            2009-12-16  2423  	}
6a46079cf57a7f Andi Kleen              2009-09-16  2424  
6a46079cf57a7f Andi Kleen              2009-09-16  2425  	/*
6a46079cf57a7f Andi Kleen              2009-09-16  2426  	 * Torn down by someone else?
6a46079cf57a7f Andi Kleen              2009-09-16  2427  	 */
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2428) 	if (folio_test_lru(folio) && !folio_test_swapcache(folio) &&
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2429) 	    folio->mapping == NULL) {
b66d00dfebe79e Kefeng Wang             2022-10-21  2430  		res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
171936ddaf97e6 Tony Luck               2021-06-24  2431  		goto unlock_page;
6a46079cf57a7f Andi Kleen              2009-09-16  2432  	}
6a46079cf57a7f Andi Kleen              2009-09-16  2433  
0bc1f8b0682caa Chen Yucong             2014-07-02  2434  identify_page_state:
0348d2ebec9b00 Naoya Horiguchi         2017-07-10  2435  	res = identify_page_state(pfn, p, page_flags);
ea6d0630100b28 Naoya Horiguchi         2021-06-24  2436  	mutex_unlock(&mf_mutex);
ea6d0630100b28 Naoya Horiguchi         2021-06-24  2437  	return res;
171936ddaf97e6 Tony Luck               2021-06-24  2438  unlock_page:
5dba5c356ab3bb Matthew Wilcox (Oracle  2024-04-12  2439) 	folio_unlock(folio);
171936ddaf97e6 Tony Luck               2021-06-24  2440  unlock_mutex:
171936ddaf97e6 Tony Luck               2021-06-24  2441  	mutex_unlock(&mf_mutex);
6a46079cf57a7f Andi Kleen              2009-09-16  2442  	return res;
6a46079cf57a7f Andi Kleen              2009-09-16  2443  }
cd42f4a3b2b1c4 Tony Luck               2011-12-15  2444  EXPORT_SYMBOL_GPL(memory_failure);
847ce401df392b Wu Fengguang            2009-12-16  2445  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code
  2025-09-02  7:46 ` [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code David Hildenbrand
@ 2025-09-02 22:52   ` Alistair Popple
  0 siblings, 0 replies; 10+ messages in thread
From: Alistair Popple @ 2025-09-02 22:52 UTC (permalink / raw)
  To: David Hildenbrand
  Cc: linux-mm, akpm, osalvador, jgg, jhubbard, peterx, linux-kernel,
	dan.j.williams

On 2025-09-02 at 17:46 +1000, David Hildenbrand <david@redhat.com> wrote...
> On 02.09.25 07:14, Alistair Popple wrote:
> > Prior to aed877c2b425 ("device/dax: properly refcount device dax pages
> 
> "to commit aed877c2b425" ...
> 
> > when mapping") ZONE_DEVICE pages were not fully reference counted when
> > mapped into user page tables. Instead GUP would take a reference on the
> > associated pgmap to ensure the results of pfn_to_page() remained valid.
> > 
> > This is no longer required and most of the code was removed by
> > fd2825b0760a ("mm/gup: remove pXX_devmap usage from get_user_pages()").
> 
> "by commit fd2825b0760a"
> 
> Otherwise checkpatch complains.

Aha! Clearly I was tired ... I saw the checkpatch spew but couldn't figure out
what it was trying to tell me other than I was wrong :).

Anyway the kernel test bot tells me I missed a call site so will fix both these
issues and respin. Thanks!

> > Finish cleaning this up by removing the dead calls to put_dev_pagemap()
> > and the temporary context struct.
> 
> [...]
> 
> >   {
> > @@ -661,7 +656,7 @@ static inline bool can_follow_write_pud(pud_t pud, struct page *page,
> >   static struct page *follow_huge_pud(struct vm_area_struct *vma,
> >   				    unsigned long addr, pud_t *pudp,
> > -				    int flags, struct follow_page_context *ctx)
> > +				    int flags, unsigned long *page_mask)
> >   {
> >   	struct mm_struct *mm = vma->vm_mm;
> >   	struct page *page;
> > @@ -688,7 +683,7 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
> >   	if (ret)
> >   		page = ERR_PTR(ret);
> >   	else
> > -		ctx->page_mask = HPAGE_PUD_NR - 1;
> > +		*page_mask = HPAGE_PUD_NR - 1;
> 
> At some point we should remove that page_mask thingy as well and handle it
> like gup-fast: let follow_* will in the array directly.
> 
> 
> Lovely
> 
> Acked-by: David Hildenbrand <david@redhat.com>
> 
> -- 
> Cheers
> 
> David / dhildenb
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code
  2025-09-02  5:14 [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code Alistair Popple
                   ` (2 preceding siblings ...)
  2025-09-02 12:06 ` Jason Gunthorpe
@ 2025-09-03  1:17 ` dan.j.williams
  3 siblings, 0 replies; 10+ messages in thread
From: dan.j.williams @ 2025-09-03  1:17 UTC (permalink / raw)
  To: Alistair Popple, linux-mm, akpm
  Cc: david, osalvador, jgg, jhubbard, peterx, linux-kernel,
	dan.j.williams, Alistair Popple

Alistair Popple wrote:
> Prior to aed877c2b425 ("device/dax: properly refcount device dax pages
> when mapping") ZONE_DEVICE pages were not fully reference counted when
> mapped into user page tables. Instead GUP would take a reference on the
> associated pgmap to ensure the results of pfn_to_page() remained valid.
> 
> This is no longer required and most of the code was removed by
> fd2825b0760a ("mm/gup: remove pXX_devmap usage from get_user_pages()").
> Finish cleaning this up by removing the dead calls to put_dev_pagemap()
> and the temporary context struct.
> 
> Signed-off-by: Alistair Popple <apopple@nvidia.com>

Looks good, thanks for this work Alistair.

Reviewed-by: Dan Williams <dan.j.williams@intel.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter
  2025-09-02  5:14 ` [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter Alistair Popple
                     ` (2 preceding siblings ...)
  2025-09-02 20:31   ` kernel test robot
@ 2025-09-03  1:20   ` dan.j.williams
  3 siblings, 0 replies; 10+ messages in thread
From: dan.j.williams @ 2025-09-03  1:20 UTC (permalink / raw)
  To: Alistair Popple, linux-mm, akpm
  Cc: david, osalvador, jgg, jhubbard, peterx, linux-kernel,
	dan.j.williams, Alistair Popple

Alistair Popple wrote:
> GUP no longer uses get_dev_pagemap(). As it was the only user of the
> get_dev_pagemap() pgmap caching feature it can be removed.
> 
> Signed-off-by: Alistair Popple <apopple@nvidia.com>

Looks good after you fixup the missing conversion in memory-failure.c you
can add.

Reviewed-by: Dan Williams <dan.j.williams@intel.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2025-09-03  1:20 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-09-02  5:14 [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code Alistair Popple
2025-09-02  5:14 ` [PATCH 2/2] mm/memremap: Remove unused get_dev_pagemap() parameter Alistair Popple
2025-09-02  7:47   ` David Hildenbrand
2025-09-02 12:06   ` Jason Gunthorpe
2025-09-02 20:31   ` kernel test robot
2025-09-03  1:20   ` dan.j.williams
2025-09-02  7:46 ` [PATCH 1/2] mm/gup: Remove dead pgmap refcounting code David Hildenbrand
2025-09-02 22:52   ` Alistair Popple
2025-09-02 12:06 ` Jason Gunthorpe
2025-09-03  1:17 ` dan.j.williams

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).