From: fujunjie <fujunjie1@qq.com>
To: Matthew Wilcox <willy@infradead.org>, Jan Kara <jack@suse.cz>,
Andrew Morton <akpm@linux-foundation.org>
Cc: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org,
Roman Gushchin <roman.gushchin@linux.dev>,
Haoran Zhu <zhr1502@sjtu.edu.cn>
Subject: [PATCH v3 1/2] mm/filemap: count only the faulting address as a mmap hit
Date: Tue, 28 Apr 2026 01:59:43 +0000 [thread overview]
Message-ID: <tencent_756F151FE66F3D80479A6F982C0AB8569F09@qq.com> (raw)
In-Reply-To: <tencent_AA501E9A238337BD167E5C2ACF948A1AF308@qq.com>
filemap_map_pages() reduces file->f_ra.mmap_miss when fault-around maps
folios that are already present in the page cache. That hit accounting
is too generous because fault-around can install PTEs around the
faulting address even though the fault only proves that the faulting
address was accessed.
Move the mmap_miss update back into filemap_map_pages(), drop the
mmap_miss argument from the helper functions, and decrement mmap_miss
only when the helper return value shows that the faulting address was
mapped. Keep the existing workingset-folio behavior unchanged.
Signed-off-by: fujunjie <fujunjie1@qq.com>
---
mm/filemap.c | 62 ++++++++++++++++++++++++++++++------------------------------
1 file changed, 31 insertions(+), 31 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 4e636647100c1..543e51c32397 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3747,8 +3747,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
unsigned long addr, unsigned int nr_pages,
- unsigned long *rss, unsigned short *mmap_miss,
- pgoff_t file_end)
+ unsigned long *rss, pgoff_t file_end)
{
struct address_space *mapping = folio->mapping;
unsigned int ref_from_caller = 1;
@@ -3780,16 +3779,6 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
if (PageHWPoison(page + count))
goto skip;
- /*
- * If there are too many folios that are recently evicted
- * in a file, they will probably continue to be evicted.
- * In such situation, read-ahead is only a waste of IO.
- * Don't decrease mmap_miss in this scenario to make sure
- * we can stop read-ahead.
- */
- if (!folio_test_workingset(folio))
- (*mmap_miss)++;
-
/*
* NOTE: If there're PTE markers, we'll leave them to be
* handled in the specific fault path, and it'll prohibit the
@@ -3836,7 +3825,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
struct folio *folio, unsigned long addr,
- unsigned long *rss, unsigned short *mmap_miss)
+ unsigned long *rss)
{
vm_fault_t ret = 0;
struct page *page = &folio->page;
@@ -3844,10 +3833,6 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
if (PageHWPoison(page))
goto out;
- /* See comment of filemap_map_folio_range() */
- if (!folio_test_workingset(folio))
- (*mmap_miss)++;
-
/*
* NOTE: If there're PTE markers, we'll leave them to be
* handled in the specific fault path, and it'll prohibit
@@ -3882,7 +3867,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
vm_fault_t ret = 0;
unsigned long rss = 0;
unsigned int nr_pages = 0, folio_type;
- unsigned short mmap_miss = 0, mmap_miss_saved;
/*
* Recalculate end_pgoff based on file_end before calling
@@ -3921,6 +3905,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
folio_type = mm_counter_file(folio);
do {
unsigned long end;
+ vm_fault_t map_ret;
addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
vmf->pte += xas.xa_index - last_pgoff;
@@ -3928,13 +3913,34 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
end = folio_next_index(folio) - 1;
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
- if (!folio_test_large(folio))
- ret |= filemap_map_order0_folio(vmf,
- folio, addr, &rss, &mmap_miss);
- else
- ret |= filemap_map_folio_range(vmf, folio,
- xas.xa_index - folio->index, addr,
- nr_pages, &rss, &mmap_miss, file_end);
+ if (!folio_test_large(folio)) {
+ map_ret = filemap_map_order0_folio(vmf, folio, addr,
+ &rss);
+ } else {
+ unsigned long start = xas.xa_index - folio->index;
+
+ map_ret = filemap_map_folio_range(vmf, folio, start,
+ addr, nr_pages, &rss,
+ file_end);
+ }
+ ret |= map_ret;
+
+ /*
+ * If there are too many folios that are recently evicted
+ * in a file, they will probably continue to be evicted.
+ * In such situation, read-ahead is only a waste of IO.
+ * Don't decrease mmap_miss in this scenario to make sure
+ * we can stop read-ahead.
+ */
+ if ((map_ret & VM_FAULT_NOPAGE) &&
+ !folio_test_workingset(folio)) {
+ unsigned short mmap_miss;
+
+ mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
+ if (mmap_miss)
+ WRITE_ONCE(file->f_ra.mmap_miss,
+ mmap_miss - 1);
+ }
folio_unlock(folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
@@ -3944,12 +3943,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
out:
rcu_read_unlock();
- mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
- if (mmap_miss >= mmap_miss_saved)
- WRITE_ONCE(file->f_ra.mmap_miss, 0);
- else
- WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
-
return ret;
}
EXPORT_SYMBOL(filemap_map_pages);
--
2.34.1
next prev parent reply other threads:[~2026-04-28 2:00 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-28 1:57 [PATCH v3 0/2] mm/filemap: tighten mmap_miss hit accounting fujunjie
2026-04-28 1:59 ` fujunjie [this message]
2026-04-28 9:47 ` [PATCH v3 1/2] mm/filemap: count only the faulting address as a mmap hit Jan Kara
2026-05-10 12:45 ` Vishal Moola
2026-04-28 1:59 ` [PATCH v3 2/2] mm/filemap: do not count FAULT_FLAG_TRIED retries as mmap hits fujunjie
2026-04-28 9:48 ` Jan Kara
2026-05-10 12:46 ` Vishal Moola
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=tencent_756F151FE66F3D80479A6F982C0AB8569F09@qq.com \
--to=fujunjie1@qq.com \
--cc=akpm@linux-foundation.org \
--cc=jack@suse.cz \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=roman.gushchin@linux.dev \
--cc=willy@infradead.org \
--cc=zhr1502@sjtu.edu.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox