From: Zi Yan <ziy@nvidia.com>
To: Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Song Liu <songliubraving@fb.com>
Cc: Chris Mason <clm@fb.com>, David Sterba <dsterba@suse.com>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>, Jan Kara <jack@suse.cz>,
Lorenzo Stoakes <ljs@kernel.org>, Zi Yan <ziy@nvidia.com>,
Baolin Wang <baolin.wang@linux.alibaba.com>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Nico Pache <npache@redhat.com>,
Ryan Roberts <ryan.roberts@arm.com>, Dev Jain <dev.jain@arm.com>,
Barry Song <baohua@kernel.org>, Lance Yang <lance.yang@linux.dev>,
Vlastimil Babka <vbabka@kernel.org>,
Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>, Shuah Khan <shuah@kernel.org>,
linux-btrfs@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
linux-kselftest@vger.kernel.org
Subject: [PATCH v5 14/14] selftests/mm: add writable-file collapse tests for khugepaged
Date: Wed, 29 Apr 2026 11:35:37 -0400 [thread overview]
Message-ID: <20260429153538.727855-10-ziy@nvidia.com> (raw)
In-Reply-To: <20260429152924.727124-1-ziy@nvidia.com>
collapse_file() now supports collapsing clean pagecache folios from
writable files, so add corresponding tests.
Note that madvise(MADV_COLLAPSE) works for dirty pagecache folios from
writable files, because collapse_single_pmd() triggers a synchronous
writeback when first attempt of collapse_file() fails. That writeback makes
dirty folios clean and the retry of collapse_file() succeeds.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
tools/testing/selftests/mm/khugepaged.c | 113 ++++++++++++++++++------
1 file changed, 86 insertions(+), 27 deletions(-)
diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c
index 80b913185c643..e73aab5149bdf 100644
--- a/tools/testing/selftests/mm/khugepaged.c
+++ b/tools/testing/selftests/mm/khugepaged.c
@@ -41,6 +41,12 @@ enum vma_type {
VMA_SHMEM,
};
+enum file_setup_ops {
+ FILE_SETUP_READ_ONLY_FS,
+ FILE_SETUP_READ_WRITE_FS_READ_DATA,
+ FILE_SETUP_READ_WRITE_FS_WRITE_DATA,
+};
+
struct mem_ops {
void *(*setup_area)(int nr_hpages);
void (*cleanup_area)(void *p, unsigned long size);
@@ -50,7 +56,8 @@ struct mem_ops {
};
static struct mem_ops *read_only_file_ops;
-static struct mem_ops *read_write_file_ops;
+static struct mem_ops *read_write_file_read_ops;
+static struct mem_ops *read_write_file_write_ops;
static struct mem_ops *anon_ops;
static struct mem_ops *shmem_ops;
@@ -113,7 +120,8 @@ static void restore_settings(int sig)
static void save_settings(void)
{
printf("Save THP and khugepaged settings...");
- if ((read_only_file_ops || read_write_file_ops) &&
+ if ((read_only_file_ops || read_write_file_read_ops ||
+ read_write_file_write_ops) &&
finfo.type == VMA_FILE)
thp_set_read_ahead_path(finfo.dev_queue_read_ahead_path);
thp_save_settings();
@@ -366,14 +374,14 @@ static bool anon_check_huge(void *addr, int nr_hpages)
return check_huge_anon(addr, nr_hpages, hpage_pmd_size);
}
-static void *file_setup_area_common(int nr_hpages, bool read_only)
+static void *file_setup_area_common(int nr_hpages, enum file_setup_ops setup)
{
int fd;
void *p;
unsigned long size;
- int open_opt = read_only ? O_RDONLY : O_RDWR;
- int mmap_prot = read_only ? PROT_READ : (PROT_READ | PROT_WRITE);
- int mmap_opt = read_only ? MAP_PRIVATE : MAP_SHARED;
+ int open_opt = setup == FILE_SETUP_READ_ONLY_FS ? O_RDONLY : O_RDWR;
+ int mmap_prot = setup == FILE_SETUP_READ_ONLY_FS ? PROT_READ : (PROT_READ | PROT_WRITE);
+ int mmap_opt = setup == FILE_SETUP_READ_ONLY_FS ? MAP_PRIVATE : MAP_SHARED;
unlink(finfo.path); /* Cleanup from previous failed tests */
printf("Creating %s for collapse%s...", finfo.path,
@@ -405,7 +413,10 @@ static void *file_setup_area_common(int nr_hpages, bool read_only)
success("OK");
printf("Opening %s %s for collapse...", finfo.path,
- read_only ? "read only" : "read-write");
+ setup == FILE_SETUP_READ_ONLY_FS ? "read only" :
+ setup == FILE_SETUP_READ_WRITE_FS_READ_DATA ?
+ "read-write (read)" :
+ "read-write (write)");
finfo.fd = open(finfo.path, open_opt, 777);
if (finfo.fd < 0) {
perror("open()");
@@ -426,12 +437,17 @@ static void *file_setup_area_common(int nr_hpages, bool read_only)
static void *file_setup_read_only_area(int nr_hpages)
{
- return file_setup_area_common(nr_hpages, /* read_only= */ true);
+ return file_setup_area_common(nr_hpages, FILE_SETUP_READ_ONLY_FS);
+}
+
+static void *file_setup_read_write_fs_read_area(int nr_hpages)
+{
+ return file_setup_area_common(nr_hpages, FILE_SETUP_READ_WRITE_FS_READ_DATA);
}
-static void *file_setup_read_write_area(int nr_hpages)
+static void *file_setup_read_write_fs_write_area(int nr_hpages)
{
- return file_setup_area_common(nr_hpages, /* read_only= */ false);
+ return file_setup_area_common(nr_hpages, FILE_SETUP_READ_WRITE_FS_WRITE_DATA);
}
static void file_cleanup_area(void *p, unsigned long size)
@@ -455,6 +471,17 @@ static void file_fault_read(void *p, unsigned long start, unsigned long end)
file_fault_common(p, start, end, MADV_POPULATE_READ);
}
+static void file_fault_read_and_flush(void *p, unsigned long start, unsigned long end)
+{
+ file_fault_common(p, start, end, MADV_POPULATE_READ);
+
+ /*
+ * make folio clean, since dirty folios from read&write file are
+ * rejected and not flushed
+ */
+ msync((char *)p + start, end - start, MS_SYNC);
+}
+
static void file_fault_write(void *p, unsigned long start, unsigned long end)
{
file_fault_common(p, start, end, MADV_POPULATE_WRITE);
@@ -523,8 +550,16 @@ static struct mem_ops __read_only_file_ops = {
.name = "file",
};
-static struct mem_ops __read_write_file_ops = {
- .setup_area = &file_setup_read_write_area,
+static struct mem_ops __read_write_file_read_ops = {
+ .setup_area = &file_setup_read_write_fs_read_area,
+ .cleanup_area = &file_cleanup_area,
+ .fault = &file_fault_read_and_flush,
+ .check_huge = &file_check_huge,
+ .name = "file",
+};
+
+static struct mem_ops __read_write_file_write_ops = {
+ .setup_area = &file_setup_read_write_fs_write_area,
.cleanup_area = &file_cleanup_area,
.fault = &file_fault_write,
.check_huge = &file_check_huge,
@@ -542,7 +577,8 @@ static struct mem_ops __shmem_ops = {
static bool is_tmpfs(struct mem_ops *ops)
{
return (ops == &__read_only_file_ops ||
- ops == &__read_write_file_ops) &&
+ ops == &__read_write_file_read_ops ||
+ ops == &__read_write_file_write_ops) &&
finfo.type == VMA_SHMEM;
}
@@ -559,9 +595,11 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages,
printf("%s...", msg);
- /* read&write file collapse always fail */
- if (!is_tmpfs(ops) && ops == &__read_write_file_ops)
- expect = false;
+ /*
+ * read&write file collapse succeeds for MADV_COLLAPSE because dirty
+ * folios are written back after collapse fails for dirty folios and
+ * another collapse is attempted.
+ */
/*
* Prevent khugepaged interference and tests that MADV_COLLAPSE
@@ -629,8 +667,11 @@ static bool wait_for_scan(const char *msg, char *p, int nr_hpages,
static void khugepaged_collapse(const char *msg, char *p, int nr_hpages,
struct mem_ops *ops, bool expect)
{
- /* read&write file collapse always fail */
- if (!is_tmpfs(ops) && ops == &__read_write_file_ops)
+ /*
+ * read&write file collapse fails since khugepaged does not flush
+ * the target dirty folios
+ */
+ if (!is_tmpfs(ops) && ops == &__read_write_file_write_ops)
expect = false;
if (wait_for_scan(msg, p, nr_hpages, ops)) {
@@ -753,6 +794,9 @@ static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *o
validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size);
if (c->enforce_pte_scan_limits) {
+ ops->cleanup_area(p, hpage_pmd_size);
+ p = ops->setup_area(1);
+
ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
c->collapse("Collapse with max_ptes_none PTEs empty", p, 1, ops,
true);
@@ -1200,21 +1244,24 @@ static void parse_test_type(int argc, char **argv)
if (!strcmp(buf, "all")) {
read_only_file_ops = &__read_only_file_ops;
- read_write_file_ops = &__read_write_file_ops;
+ read_write_file_read_ops = &__read_write_file_read_ops;
+ read_write_file_write_ops = &__read_write_file_write_ops;
anon_ops = &__anon_ops;
shmem_ops = &__shmem_ops;
} else if (!strcmp(buf, "anon")) {
anon_ops = &__anon_ops;
} else if (!strcmp(buf, "file")) {
read_only_file_ops = &__read_only_file_ops;
- read_write_file_ops = &__read_write_file_ops;
+ read_write_file_read_ops = &__read_write_file_read_ops;
+ read_write_file_write_ops = &__read_write_file_write_ops;
} else if (!strcmp(buf, "shmem")) {
shmem_ops = &__shmem_ops;
} else {
usage();
}
- if (!read_only_file_ops && !read_write_file_ops)
+ if (!read_only_file_ops && !read_write_file_read_ops &&
+ !read_write_file_write_ops)
return;
if (argc != 2)
@@ -1287,11 +1334,13 @@ int main(int argc, char **argv)
TEST(collapse_full, khugepaged_context, anon_ops);
TEST(collapse_full, khugepaged_context, read_only_file_ops);
- TEST(collapse_full, khugepaged_context, read_write_file_ops);
+ TEST(collapse_full, khugepaged_context, read_write_file_read_ops);
+ TEST(collapse_full, khugepaged_context, read_write_file_write_ops);
TEST(collapse_full, khugepaged_context, shmem_ops);
TEST(collapse_full, madvise_context, anon_ops);
TEST(collapse_full, madvise_context, read_only_file_ops);
- TEST(collapse_full, madvise_context, read_write_file_ops);
+ TEST(collapse_full, madvise_context, read_write_file_read_ops);
+ TEST(collapse_full, madvise_context, read_write_file_write_ops);
TEST(collapse_full, madvise_context, shmem_ops);
TEST(collapse_empty, khugepaged_context, anon_ops);
@@ -1299,30 +1348,38 @@ int main(int argc, char **argv)
TEST(collapse_single_pte_entry, khugepaged_context, anon_ops);
TEST(collapse_single_pte_entry, khugepaged_context, read_only_file_ops);
- TEST(collapse_single_pte_entry, khugepaged_context, read_write_file_ops);
+ TEST(collapse_single_pte_entry, khugepaged_context, read_write_file_read_ops);
+ TEST(collapse_single_pte_entry, khugepaged_context, read_write_file_write_ops);
TEST(collapse_single_pte_entry, khugepaged_context, shmem_ops);
TEST(collapse_single_pte_entry, madvise_context, anon_ops);
TEST(collapse_single_pte_entry, madvise_context, read_only_file_ops);
- TEST(collapse_single_pte_entry, madvise_context, read_write_file_ops);
+ TEST(collapse_single_pte_entry, madvise_context, read_write_file_read_ops);
+ TEST(collapse_single_pte_entry, madvise_context, read_write_file_write_ops);
TEST(collapse_single_pte_entry, madvise_context, shmem_ops);
TEST(collapse_max_ptes_none, khugepaged_context, anon_ops);
TEST(collapse_max_ptes_none, khugepaged_context, read_only_file_ops);
- TEST(collapse_max_ptes_none, khugepaged_context, read_write_file_ops);
+ TEST(collapse_max_ptes_none, khugepaged_context, read_write_file_read_ops);
+ TEST(collapse_max_ptes_none, khugepaged_context, read_write_file_write_ops);
TEST(collapse_max_ptes_none, madvise_context, anon_ops);
TEST(collapse_max_ptes_none, madvise_context, read_only_file_ops);
- TEST(collapse_max_ptes_none, madvise_context, read_write_file_ops);
+ TEST(collapse_max_ptes_none, madvise_context, read_write_file_read_ops);
+ TEST(collapse_max_ptes_none, madvise_context, read_write_file_write_ops);
TEST(collapse_single_pte_entry_compound, khugepaged_context, anon_ops);
TEST(collapse_single_pte_entry_compound, khugepaged_context, read_only_file_ops);
+ TEST(collapse_single_pte_entry_compound, khugepaged_context, read_write_file_read_ops);
TEST(collapse_single_pte_entry_compound, madvise_context, anon_ops);
TEST(collapse_single_pte_entry_compound, madvise_context, read_only_file_ops);
+ TEST(collapse_single_pte_entry_compound, madvise_context, read_write_file_read_ops);
TEST(collapse_full_of_compound, khugepaged_context, anon_ops);
TEST(collapse_full_of_compound, khugepaged_context, read_only_file_ops);
+ TEST(collapse_full_of_compound, khugepaged_context, read_write_file_read_ops);
TEST(collapse_full_of_compound, khugepaged_context, shmem_ops);
TEST(collapse_full_of_compound, madvise_context, anon_ops);
TEST(collapse_full_of_compound, madvise_context, read_only_file_ops);
+ TEST(collapse_full_of_compound, madvise_context, read_write_file_read_ops);
TEST(collapse_full_of_compound, madvise_context, shmem_ops);
TEST(collapse_compound_extreme, khugepaged_context, anon_ops);
@@ -1345,9 +1402,11 @@ int main(int argc, char **argv)
TEST(madvise_collapse_existing_thps, madvise_context, anon_ops);
TEST(madvise_collapse_existing_thps, madvise_context, read_only_file_ops);
+ TEST(madvise_collapse_existing_thps, madvise_context, read_write_file_read_ops);
TEST(madvise_collapse_existing_thps, madvise_context, shmem_ops);
TEST(madvise_retracted_page_tables, madvise_context, read_only_file_ops);
+ TEST(madvise_retracted_page_tables, madvise_context, read_write_file_read_ops);
TEST(madvise_retracted_page_tables, madvise_context, shmem_ops);
restore_settings(0);
--
2.53.0
next prev parent reply other threads:[~2026-04-29 15:37 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-29 15:29 [PATCH v5 00/14] Remove CONFIG_READ_ONLY_THP_FOR_FS and enable file THP for writable files Zi Yan
2026-04-29 15:29 ` [PATCH v5 01/14] mm/khugepaged: remove READ_ONLY_THP_FOR_FS check Zi Yan
2026-04-30 14:37 ` Zi Yan
2026-04-30 15:04 ` Andrew Morton
2026-05-04 3:48 ` Nico Pache
2026-04-29 15:29 ` [PATCH v5 02/14] mm/khugepaged: add folio dirty check after try_to_unmap() Zi Yan
2026-04-30 15:11 ` Zi Yan
2026-05-04 3:53 ` Nico Pache
2026-05-06 5:23 ` Lance Yang
2026-04-29 15:29 ` [PATCH v5 03/14] mm/huge_memory: remove READ_ONLY_THP_FOR_FS from file_thp_enabled() Zi Yan
2026-05-04 3:57 ` Nico Pache
2026-04-29 15:29 ` [PATCH v5 04/14] mm/khugepaged: remove READ_ONLY_THP_FOR_FS check in hugepage_enabled() Zi Yan
2026-05-04 4:00 ` Nico Pache
2026-04-29 15:35 ` [PATCH v5 05/14] mm: remove READ_ONLY_THP_FOR_FS Kconfig option Zi Yan
2026-05-04 4:02 ` Nico Pache
2026-04-29 15:35 ` [PATCH v5 06/14] mm: fs: remove filemap_nr_thps*() functions and their users Zi Yan
2026-04-29 15:35 ` [PATCH v5 07/14] fs: remove nr_thps from struct address_space Zi Yan
2026-05-04 4:11 ` Nico Pache
2026-04-29 15:35 ` [PATCH v5 08/14] mm/huge_memory: remove folio split check for READ_ONLY_THP_FOR_FS Zi Yan
2026-04-29 15:35 ` [PATCH v5 09/14] mm/truncate: use folio_split() in truncate_inode_partial_folio() Zi Yan
2026-04-30 15:12 ` Zi Yan
2026-04-29 15:35 ` [PATCH v5 10/14] fs/btrfs: remove a comment referring to READ_ONLY_THP_FOR_FS Zi Yan
2026-04-29 15:35 ` [PATCH v5 11/14] selftests/mm: remove READ_ONLY_THP_FOR_FS in khugepaged Zi Yan
2026-04-30 15:16 ` Zi Yan
2026-04-30 15:27 ` Zi Yan
2026-05-04 4:23 ` Nico Pache
2026-05-06 13:11 ` Zi Yan
2026-05-04 10:11 ` Nico Pache
2026-05-06 13:15 ` Zi Yan
2026-04-29 15:35 ` [PATCH v5 12/14] selftests/mm: remove READ_ONLY_THP_FOR_FS code from guard-regions Zi Yan
2026-04-29 15:35 ` [PATCH v5 13/14] mm/khugepaged: enable clean pagecache folio collapse for writable files Zi Yan
2026-04-30 15:18 ` Zi Yan
2026-04-29 15:35 ` Zi Yan [this message]
2026-04-29 16:13 ` [PATCH v5 00/14] Remove CONFIG_READ_ONLY_THP_FOR_FS and enable file THP " Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260429153538.727855-10-ziy@nvidia.com \
--to=ziy@nvidia.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=brauner@kernel.org \
--cc=clm@fb.com \
--cc=david@kernel.org \
--cc=dev.jain@arm.com \
--cc=dsterba@suse.com \
--cc=jack@suse.cz \
--cc=lance.yang@linux.dev \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ljs@kernel.org \
--cc=mhocko@suse.com \
--cc=npache@redhat.com \
--cc=rppt@kernel.org \
--cc=ryan.roberts@arm.com \
--cc=shuah@kernel.org \
--cc=songliubraving@fb.com \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox