public inbox for linux-mm@kvack.org
 help / color / mirror / Atom feed
* [PATCH v2 1/2] mm/filemap: count only the faulting address as a mmap hit
       [not found] <20260427165751.746502-1-fujunjie1@qq.com>
@ 2026-04-27 16:57 ` fujunjie
  2026-04-27 18:40   ` Matthew Wilcox
  2026-04-27 16:57 ` [PATCH v2 2/2] mm/filemap: do not count FAULT_FLAG_TRIED retries as mmap hits fujunjie
  1 sibling, 1 reply; 4+ messages in thread
From: fujunjie @ 2026-04-27 16:57 UTC (permalink / raw)
  To: Matthew Wilcox, Jan Kara, Andrew Morton
  Cc: linux-fsdevel, linux-mm, linux-kernel, Roman Gushchin, Haoran Zhu

filemap_map_pages() reduces file->f_ra.mmap_miss when fault-around maps
folios that are already present in the page cache.  That hit accounting
is too generous because fault-around can install PTEs around the
faulting address even though the fault only proves that the faulting
address was accessed.

Move the mmap_miss update back into filemap_map_pages(), drop the
mmap_miss argument from the helper functions, and decrement mmap_miss
only when the helper return value shows that the faulting address was
mapped.  Keep the existing workingset-folio behavior unchanged.

Signed-off-by: fujunjie <fujunjie1@qq.com>
---
 mm/filemap.c | 55 +++++++++++++++++++++++-----------------------------
 1 file changed, 24 insertions(+), 31 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 4e636647100c1..41ffe9036c96e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3747,8 +3747,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
 static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
 			struct folio *folio, unsigned long start,
 			unsigned long addr, unsigned int nr_pages,
-			unsigned long *rss, unsigned short *mmap_miss,
-			pgoff_t file_end)
+			unsigned long *rss, pgoff_t file_end)
 {
 	struct address_space *mapping = folio->mapping;
 	unsigned int ref_from_caller = 1;
@@ -3780,16 +3779,6 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
 		if (PageHWPoison(page + count))
 			goto skip;
 
-		/*
-		 * If there are too many folios that are recently evicted
-		 * in a file, they will probably continue to be evicted.
-		 * In such situation, read-ahead is only a waste of IO.
-		 * Don't decrease mmap_miss in this scenario to make sure
-		 * we can stop read-ahead.
-		 */
-		if (!folio_test_workingset(folio))
-			(*mmap_miss)++;
-
 		/*
 		 * NOTE: If there're PTE markers, we'll leave them to be
 		 * handled in the specific fault path, and it'll prohibit the
@@ -3836,7 +3825,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
 
 static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
 		struct folio *folio, unsigned long addr,
-		unsigned long *rss, unsigned short *mmap_miss)
+		unsigned long *rss)
 {
 	vm_fault_t ret = 0;
 	struct page *page = &folio->page;
@@ -3844,10 +3833,6 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
 	if (PageHWPoison(page))
 		goto out;
 
-	/* See comment of filemap_map_folio_range() */
-	if (!folio_test_workingset(folio))
-		(*mmap_miss)++;
-
 	/*
 	 * NOTE: If there're PTE markers, we'll leave them to be
 	 * handled in the specific fault path, and it'll prohibit
@@ -3882,7 +3867,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 	vm_fault_t ret = 0;
 	unsigned long rss = 0;
 	unsigned int nr_pages = 0, folio_type;
-	unsigned short mmap_miss = 0, mmap_miss_saved;
 
 	/*
 	 * Recalculate end_pgoff based on file_end before calling
@@ -3921,6 +3905,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 	folio_type = mm_counter_file(folio);
 	do {
 		unsigned long end;
+		vm_fault_t map_ret;
 
 		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
 		vmf->pte += xas.xa_index - last_pgoff;
@@ -3928,13 +3913,27 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 		end = folio_next_index(folio) - 1;
 		nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
 
-		if (!folio_test_large(folio))
-			ret |= filemap_map_order0_folio(vmf,
-					folio, addr, &rss, &mmap_miss);
-		else
-			ret |= filemap_map_folio_range(vmf, folio,
-					xas.xa_index - folio->index, addr,
-					nr_pages, &rss, &mmap_miss, file_end);
+		if (!folio_test_large(folio)) {
+			map_ret = filemap_map_order0_folio(vmf, folio, addr,
+							   &rss);
+		} else {
+			unsigned long start = xas.xa_index - folio->index;
+
+			map_ret = filemap_map_folio_range(vmf, folio, start,
+							  addr, nr_pages, &rss,
+							  file_end);
+		}
+		ret |= map_ret;
+
+		if ((map_ret & VM_FAULT_NOPAGE) &&
+		    !folio_test_workingset(folio)) {
+			unsigned short mmap_miss;
+
+			mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
+			if (mmap_miss)
+				WRITE_ONCE(file->f_ra.mmap_miss,
+					   mmap_miss - 1);
+		}
 
 		folio_unlock(folio);
 	} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
@@ -3944,12 +3943,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 out:
 	rcu_read_unlock();
 
-	mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
-	if (mmap_miss >= mmap_miss_saved)
-		WRITE_ONCE(file->f_ra.mmap_miss, 0);
-	else
-		WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
-
 	return ret;
 }
 EXPORT_SYMBOL(filemap_map_pages);
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 2/2] mm/filemap: do not count FAULT_FLAG_TRIED retries as mmap hits
       [not found] <20260427165751.746502-1-fujunjie1@qq.com>
  2026-04-27 16:57 ` [PATCH v2 1/2] mm/filemap: count only the faulting address as a mmap hit fujunjie
@ 2026-04-27 16:57 ` fujunjie
  1 sibling, 0 replies; 4+ messages in thread
From: fujunjie @ 2026-04-27 16:57 UTC (permalink / raw)
  To: Matthew Wilcox, Jan Kara, Andrew Morton
  Cc: linux-fsdevel, linux-mm, linux-kernel, Roman Gushchin, Haoran Zhu

A fault that starts synchronous mmap readahead can return VM_FAULT_RETRY
after dropping mmap_lock.  The retry may then map the folio brought in
by that same miss.

Do not let this retry decrement mmap_miss.  The retry still maps the
folio from the page cache; it just does not count as a useful mmap
readahead hit.

Signed-off-by: fujunjie <fujunjie1@qq.com>
---
 mm/filemap.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/mm/filemap.c b/mm/filemap.c
index 41ffe9036c96e..f244886b1df3b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3926,6 +3926,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 		ret |= map_ret;
 
 		if ((map_ret & VM_FAULT_NOPAGE) &&
+		    !(vmf->flags & FAULT_FLAG_TRIED) &&
 		    !folio_test_workingset(folio)) {
 			unsigned short mmap_miss;
 
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 1/2] mm/filemap: count only the faulting address as a mmap hit
  2026-04-27 16:57 ` [PATCH v2 1/2] mm/filemap: count only the faulting address as a mmap hit fujunjie
@ 2026-04-27 18:40   ` Matthew Wilcox
  2026-04-28  1:12     ` Fujunjie
  0 siblings, 1 reply; 4+ messages in thread
From: Matthew Wilcox @ 2026-04-27 18:40 UTC (permalink / raw)
  To: fujunjie
  Cc: Jan Kara, Andrew Morton, linux-fsdevel, linux-mm, linux-kernel,
	Roman Gushchin, Haoran Zhu

On Mon, Apr 27, 2026 at 04:57:50PM +0000, fujunjie wrote:
> -		/*
> -		 * If there are too many folios that are recently evicted
> -		 * in a file, they will probably continue to be evicted.
> -		 * In such situation, read-ahead is only a waste of IO.
> -		 * Don't decrease mmap_miss in this scenario to make sure
> -		 * we can stop read-ahead.
> -		 */

I'm sad to lose this comment.  Why not move it to ...

> +		ret |= map_ret;

here?

> +		if ((map_ret & VM_FAULT_NOPAGE) &&
> +		    !folio_test_workingset(folio)) {
> +			unsigned short mmap_miss;
> +
> +			mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
> +			if (mmap_miss)
> +				WRITE_ONCE(file->f_ra.mmap_miss,
> +					   mmap_miss - 1);
> +		}


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 1/2] mm/filemap: count only the faulting address as a mmap hit
  2026-04-27 18:40   ` Matthew Wilcox
@ 2026-04-28  1:12     ` Fujunjie
  0 siblings, 0 replies; 4+ messages in thread
From: Fujunjie @ 2026-04-28  1:12 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: Jan Kara, Andrew Morton, linux-fsdevel, linux-mm, linux-kernel,
	Roman Gushchin, Haoran Zhu



On Tue, Apr 28, 2026 at 02:40, Matthew Wilcox wrote:
> On Mon, Apr 27, 2026 at 04:57:50PM +0000, fujunjie wrote:
>> -		/*
>> -		 * If there are too many folios that are recently evicted
>> -		 * in a file, they will probably continue to be evicted.
>> -		 * In such situation, read-ahead is only a waste of IO.
>> -		 * Don't decrease mmap_miss in this scenario to make sure
>> -		 * we can stop read-ahead.
>> -		 */
> 
> I'm sad to lose this comment.  Why not move it to ...
> 
>> +		ret |= map_ret;
> 
> here?
> 
>> +		if ((map_ret & VM_FAULT_NOPAGE) &&
>> +		    !folio_test_workingset(folio)) {
>> +			unsigned short mmap_miss;
>> +
>> +			mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
>> +			if (mmap_miss)
>> +				WRITE_ONCE(file->f_ra.mmap_miss,
>> +					   mmap_miss - 1);
>> +		}

Yes, that makes sense. I'll move the comment there in the next version.

Best regards,
fujunjie



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-04-28  1:13 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20260427165751.746502-1-fujunjie1@qq.com>
2026-04-27 16:57 ` [PATCH v2 1/2] mm/filemap: count only the faulting address as a mmap hit fujunjie
2026-04-27 18:40   ` Matthew Wilcox
2026-04-28  1:12     ` Fujunjie
2026-04-27 16:57 ` [PATCH v2 2/2] mm/filemap: do not count FAULT_FLAG_TRIED retries as mmap hits fujunjie

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox