linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] secretmem: Remove uses of struct page
@ 2025-06-13 19:47 Matthew Wilcox (Oracle)
  2025-06-15  7:10 ` Mike Rapoport
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Matthew Wilcox (Oracle) @ 2025-06-13 19:47 UTC (permalink / raw)
  To: Andrew Morton, Mike Rapoport; +Cc: Matthew Wilcox (Oracle), linux-mm

Use filemap_lock_folio() instead of find_lock_page() to retrieve
a folio from the page cache.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/secretmem.c | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/mm/secretmem.c b/mm/secretmem.c
index 589b26c2d553..dfa8d8d10bcb 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -54,7 +54,6 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
 	pgoff_t offset = vmf->pgoff;
 	gfp_t gfp = vmf->gfp_mask;
 	unsigned long addr;
-	struct page *page;
 	struct folio *folio;
 	vm_fault_t ret;
 	int err;
@@ -65,16 +64,15 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
 	filemap_invalidate_lock_shared(mapping);
 
 retry:
-	page = find_lock_page(mapping, offset);
-	if (!page) {
+	folio = filemap_lock_folio(mapping, offset);
+	if (!folio) {
 		folio = folio_alloc(gfp | __GFP_ZERO, 0);
 		if (!folio) {
 			ret = VM_FAULT_OOM;
 			goto out;
 		}
 
-		page = &folio->page;
-		err = set_direct_map_invalid_noflush(page);
+		err = set_direct_map_invalid_noflush(folio_page(folio, 0));
 		if (err) {
 			folio_put(folio);
 			ret = vmf_error(err);
@@ -90,7 +88,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
 			 * already happened when we marked the page invalid
 			 * which guarantees that this call won't fail
 			 */
-			set_direct_map_default_noflush(page);
+			set_direct_map_default_noflush(folio_page(folio, 0));
 			if (err == -EEXIST)
 				goto retry;
 
@@ -98,11 +96,11 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
 			goto out;
 		}
 
-		addr = (unsigned long)page_address(page);
+		addr = (unsigned long)folio_address(folio);
 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 	}
 
-	vmf->page = page;
+	vmf->page = folio_file_page(folio, vmf->pgoff);
 	ret = VM_FAULT_LOCKED;
 
 out:
@@ -154,7 +152,7 @@ static int secretmem_migrate_folio(struct address_space *mapping,
 
 static void secretmem_free_folio(struct folio *folio)
 {
-	set_direct_map_default_noflush(&folio->page);
+	set_direct_map_default_noflush(folio_page(folio, 0));
 	folio_zero_segment(folio, 0, folio_size(folio));
 }
 
-- 
2.47.2



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] secretmem: Remove uses of struct page
  2025-06-13 19:47 [PATCH] secretmem: Remove uses of struct page Matthew Wilcox (Oracle)
@ 2025-06-15  7:10 ` Mike Rapoport
  2025-06-16 13:56 ` Lorenzo Stoakes
  2025-06-16 21:19 ` David Hildenbrand
  2 siblings, 0 replies; 4+ messages in thread
From: Mike Rapoport @ 2025-06-15  7:10 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: Andrew Morton, linux-mm

On Fri, Jun 13, 2025 at 08:47:43PM +0100, Matthew Wilcox (Oracle) wrote:
> Use filemap_lock_folio() instead of find_lock_page() to retrieve
> a folio from the page cache.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

> ---
>  mm/secretmem.c | 16 +++++++---------
>  1 file changed, 7 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/secretmem.c b/mm/secretmem.c
> index 589b26c2d553..dfa8d8d10bcb 100644
> --- a/mm/secretmem.c
> +++ b/mm/secretmem.c
> @@ -54,7 +54,6 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
>  	pgoff_t offset = vmf->pgoff;
>  	gfp_t gfp = vmf->gfp_mask;
>  	unsigned long addr;
> -	struct page *page;
>  	struct folio *folio;
>  	vm_fault_t ret;
>  	int err;
> @@ -65,16 +64,15 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
>  	filemap_invalidate_lock_shared(mapping);
>  
>  retry:
> -	page = find_lock_page(mapping, offset);
> -	if (!page) {
> +	folio = filemap_lock_folio(mapping, offset);
> +	if (!folio) {
>  		folio = folio_alloc(gfp | __GFP_ZERO, 0);
>  		if (!folio) {
>  			ret = VM_FAULT_OOM;
>  			goto out;
>  		}
>  
> -		page = &folio->page;
> -		err = set_direct_map_invalid_noflush(page);
> +		err = set_direct_map_invalid_noflush(folio_page(folio, 0));
>  		if (err) {
>  			folio_put(folio);
>  			ret = vmf_error(err);
> @@ -90,7 +88,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
>  			 * already happened when we marked the page invalid
>  			 * which guarantees that this call won't fail
>  			 */
> -			set_direct_map_default_noflush(page);
> +			set_direct_map_default_noflush(folio_page(folio, 0));
>  			if (err == -EEXIST)
>  				goto retry;
>  
> @@ -98,11 +96,11 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
>  			goto out;
>  		}
>  
> -		addr = (unsigned long)page_address(page);
> +		addr = (unsigned long)folio_address(folio);
>  		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
>  	}
>  
> -	vmf->page = page;
> +	vmf->page = folio_file_page(folio, vmf->pgoff);
>  	ret = VM_FAULT_LOCKED;
>  
>  out:
> @@ -154,7 +152,7 @@ static int secretmem_migrate_folio(struct address_space *mapping,
>  
>  static void secretmem_free_folio(struct folio *folio)
>  {
> -	set_direct_map_default_noflush(&folio->page);
> +	set_direct_map_default_noflush(folio_page(folio, 0));
>  	folio_zero_segment(folio, 0, folio_size(folio));
>  }
>  
> -- 
> 2.47.2
> 

-- 
Sincerely yours,
Mike.


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] secretmem: Remove uses of struct page
  2025-06-13 19:47 [PATCH] secretmem: Remove uses of struct page Matthew Wilcox (Oracle)
  2025-06-15  7:10 ` Mike Rapoport
@ 2025-06-16 13:56 ` Lorenzo Stoakes
  2025-06-16 21:19 ` David Hildenbrand
  2 siblings, 0 replies; 4+ messages in thread
From: Lorenzo Stoakes @ 2025-06-16 13:56 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: Andrew Morton, Mike Rapoport, linux-mm

Hi Matthew,

This patch is causing a kernel splat when running the memfd_secret kernel mm
self-test.

This is because find_lock_page() handles errors from filemap_lock_folio(),
filtering them as NULL, but using filemap_lock_folio() directly does not.

I attach a fix-patch that fixes this for me locally...

Cheers, Lorenzo

On Fri, Jun 13, 2025 at 08:47:43PM +0100, Matthew Wilcox (Oracle) wrote:
> Use filemap_lock_folio() instead of find_lock_page() to retrieve
> a folio from the page cache.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  mm/secretmem.c | 16 +++++++---------
>  1 file changed, 7 insertions(+), 9 deletions(-)
>
> diff --git a/mm/secretmem.c b/mm/secretmem.c
> index 589b26c2d553..dfa8d8d10bcb 100644
> --- a/mm/secretmem.c
> +++ b/mm/secretmem.c
> @@ -54,7 +54,6 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
>  	pgoff_t offset = vmf->pgoff;
>  	gfp_t gfp = vmf->gfp_mask;
>  	unsigned long addr;
> -	struct page *page;
>  	struct folio *folio;
>  	vm_fault_t ret;
>  	int err;
> @@ -65,16 +64,15 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
>  	filemap_invalidate_lock_shared(mapping);
>
>  retry:
> -	page = find_lock_page(mapping, offset);
> -	if (!page) {
> +	folio = filemap_lock_folio(mapping, offset);
> +	if (!folio) {
>  		folio = folio_alloc(gfp | __GFP_ZERO, 0);
>  		if (!folio) {
>  			ret = VM_FAULT_OOM;
>  			goto out;
>  		}
>
> -		page = &folio->page;
> -		err = set_direct_map_invalid_noflush(page);
> +		err = set_direct_map_invalid_noflush(folio_page(folio, 0));
>  		if (err) {
>  			folio_put(folio);
>  			ret = vmf_error(err);
> @@ -90,7 +88,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
>  			 * already happened when we marked the page invalid
>  			 * which guarantees that this call won't fail
>  			 */
> -			set_direct_map_default_noflush(page);
> +			set_direct_map_default_noflush(folio_page(folio, 0));
>  			if (err == -EEXIST)
>  				goto retry;
>
> @@ -98,11 +96,11 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
>  			goto out;
>  		}
>
> -		addr = (unsigned long)page_address(page);
> +		addr = (unsigned long)folio_address(folio);
>  		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
>  	}
>
> -	vmf->page = page;
> +	vmf->page = folio_file_page(folio, vmf->pgoff);

Small nit here: we've already put vmf->pgoff in offset so could just use offset
here.

Same goes for the opening check in this function actually...

>  	ret = VM_FAULT_LOCKED;
>
>  out:
> @@ -154,7 +152,7 @@ static int secretmem_migrate_folio(struct address_space *mapping,
>
>  static void secretmem_free_folio(struct folio *folio)
>  {
> -	set_direct_map_default_noflush(&folio->page);
> +	set_direct_map_default_noflush(folio_page(folio, 0));
>  	folio_zero_segment(folio, 0, folio_size(folio));
>  }
>
> --
> 2.47.2
>
>
>

----8<----
From 850c9ecb82859fe572452cad93b917964b648141 Mon Sep 17 00:00:00 2001
From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Date: Mon, 16 Jun 2025 14:56:27 +0100
Subject: [PATCH] fix

Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
 mm/secretmem.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/secretmem.c b/mm/secretmem.c
index dfa8d8d10bcb..873f23c6e92b 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -65,7 +65,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)

 retry:
 	folio = filemap_lock_folio(mapping, offset);
-	if (!folio) {
+	if (IS_ERR(folio)) {
 		folio = folio_alloc(gfp | __GFP_ZERO, 0);
 		if (!folio) {
 			ret = VM_FAULT_OOM;
--
2.49.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] secretmem: Remove uses of struct page
  2025-06-13 19:47 [PATCH] secretmem: Remove uses of struct page Matthew Wilcox (Oracle)
  2025-06-15  7:10 ` Mike Rapoport
  2025-06-16 13:56 ` Lorenzo Stoakes
@ 2025-06-16 21:19 ` David Hildenbrand
  2 siblings, 0 replies; 4+ messages in thread
From: David Hildenbrand @ 2025-06-16 21:19 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton, Mike Rapoport; +Cc: linux-mm

On 13.06.25 21:47, Matthew Wilcox (Oracle) wrote:
> Use filemap_lock_folio() instead of find_lock_page() to retrieve
> a folio from the page cache.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---

[...]

>   
> -		addr = (unsigned long)page_address(page);
> +		addr = (unsigned long)folio_address(folio);
>   		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
>   	}
>   
> -	vmf->page = page;
> +	vmf->page = folio_file_page(folio, vmf->pgoff);

We only support small folios -- see the folio_alloc(gfp | __GFP_ZERO, 0) 
-- so you can just use folio here.

With the fixup from Lorenzo

Acked-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2025-06-16 21:20 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-13 19:47 [PATCH] secretmem: Remove uses of struct page Matthew Wilcox (Oracle)
2025-06-15  7:10 ` Mike Rapoport
2025-06-16 13:56 ` Lorenzo Stoakes
2025-06-16 21:19 ` David Hildenbrand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).