* [PATCH v2] selftests/mm: pagemap_scan ioctl: add PFN ZERO test cases
@ 2025-07-02 14:20 Muhammad Usama Anjum
2025-07-04 12:26 ` David Hildenbrand
0 siblings, 1 reply; 2+ messages in thread
From: Muhammad Usama Anjum @ 2025-07-02 14:20 UTC (permalink / raw)
To: Andrew Morton, Shuah Khan
Cc: Muhammad Usama Anjum, kernel, David Hildenbrand, linux-mm,
linux-kselftest, linux-kernel
Add test cases to test the correctness of PFN ZERO flag of pagemap_scan
ioctl. Test with normal pages backed memory and huge pages backed
memory.
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
---
The bug has been fixed [1].
[1] https://lore.kernel.org/all/20250617143532.2375383-1-david@redhat.com
Changes since v1:
- Skip if madvise() fails
- Skip test if use_zero_page isn't set to 1
- Keep on using memalign()+free() to allocate huge pages
---
tools/testing/selftests/mm/pagemap_ioctl.c | 86 +++++++++++++++++++++-
1 file changed, 85 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
index 57b4bba2b45f3..976ab357f4651 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+
#define _GNU_SOURCE
#include <stdio.h>
#include <fcntl.h>
@@ -1480,6 +1481,86 @@ static void transact_test(int page_size)
extra_thread_faults);
}
+bool is_use_zero_page_set(void)
+{
+ ssize_t bytes_read;
+ char buffer[2];
+ int fd;
+
+ fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page", O_RDONLY);
+ if (fd < 0)
+ return 0;
+
+ bytes_read = read(fd, buffer, sizeof(buffer) - 1);
+ if (bytes_read <= 0) {
+ close(fd);
+ return 0;
+ }
+
+ close(fd);
+ if (atoi(buffer) != 1)
+ return 0;
+
+ return 1;
+}
+
+void zeropfn_tests(void)
+{
+ unsigned long long mem_size;
+ struct page_region vec;
+ int i, ret;
+ char *mem;
+
+ /* Test with normal memory */
+ mem_size = 10 * page_size;
+ mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (mem == MAP_FAILED)
+ ksft_exit_fail_msg("error nomem\n");
+
+ /* Touch each page to ensure it's mapped */
+ for (i = 0; i < mem_size; i += page_size)
+ (void)((volatile char *)mem)[i];
+
+ ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0,
+ (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
+ if (ret < 0)
+ ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
+
+ ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size),
+ "%s all pages must have PFNZERO set\n", __func__);
+
+ munmap(mem, mem_size);
+
+ /* Test with huge page if user_zero_page is set to 1 */
+ if (!is_use_zero_page_set()) {
+ ksft_test_result_skip("%s use_zero_page not supported or set to 1\n", __func__);
+ return;
+ }
+
+ mem_size = 10 * hpage_size;
+ mem = memalign(hpage_size, mem_size);
+ if (!mem)
+ ksft_exit_fail_msg("error nomem\n");
+
+ ret = madvise(mem, mem_size, MADV_HUGEPAGE);
+ if (!ret) {
+ for (i = 0; i < mem_size; i += hpage_size)
+ (void)((volatile char *)mem)[i];
+
+ ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0,
+ (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
+ if (ret < 0)
+ ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
+
+ ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size),
+ "%s all huge pages must have PFNZERO set\n", __func__);
+
+ free(mem);
+ } else {
+ ksft_test_result_skip("%s huge page not supported\n", __func__);
+ }
+}
+
int main(int __attribute__((unused)) argc, char *argv[])
{
int shmid, buf_size, fd, i, ret;
@@ -1494,7 +1575,7 @@ int main(int __attribute__((unused)) argc, char *argv[])
if (init_uffd())
ksft_exit_pass();
- ksft_set_plan(115);
+ ksft_set_plan(117);
page_size = getpagesize();
hpage_size = read_pmd_pagesize();
@@ -1669,6 +1750,9 @@ int main(int __attribute__((unused)) argc, char *argv[])
/* 16. Userfaultfd tests */
userfaultfd_tests();
+ /* 17. ZEROPFN tests */
+ zeropfn_tests();
+
close(pagemap_fd);
ksft_exit_pass();
}
--
2.43.0
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH v2] selftests/mm: pagemap_scan ioctl: add PFN ZERO test cases
2025-07-02 14:20 [PATCH v2] selftests/mm: pagemap_scan ioctl: add PFN ZERO test cases Muhammad Usama Anjum
@ 2025-07-04 12:26 ` David Hildenbrand
0 siblings, 0 replies; 2+ messages in thread
From: David Hildenbrand @ 2025-07-04 12:26 UTC (permalink / raw)
To: Muhammad Usama Anjum, Andrew Morton, Shuah Khan
Cc: kernel, linux-mm, linux-kselftest, linux-kernel
On 02.07.25 16:20, Muhammad Usama Anjum wrote:
> Add test cases to test the correctness of PFN ZERO flag of pagemap_scan
> ioctl. Test with normal pages backed memory and huge pages backed
> memory.
>
> Cc: David Hildenbrand <david@redhat.com>
> Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
> ---
> The bug has been fixed [1].
>
> [1] https://lore.kernel.org/all/20250617143532.2375383-1-david@redhat.com
> Changes since v1:
> - Skip if madvise() fails
> - Skip test if use_zero_page isn't set to 1
> - Keep on using memalign()+free() to allocate huge pages
> ---
> tools/testing/selftests/mm/pagemap_ioctl.c | 86 +++++++++++++++++++++-
> 1 file changed, 85 insertions(+), 1 deletion(-)
>
> diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
> index 57b4bba2b45f3..976ab357f4651 100644
> --- a/tools/testing/selftests/mm/pagemap_ioctl.c
> +++ b/tools/testing/selftests/mm/pagemap_ioctl.c
> @@ -1,4 +1,5 @@
> // SPDX-License-Identifier: GPL-2.0
> +
> #define _GNU_SOURCE
> #include <stdio.h>
> #include <fcntl.h>
> @@ -1480,6 +1481,86 @@ static void transact_test(int page_size)
> extra_thread_faults);
> }
>
> +bool is_use_zero_page_set(void)
> +{
> + ssize_t bytes_read;
> + char buffer[2];
> + int fd;
> +
> + fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page", O_RDONLY);
> + if (fd < 0)
> + return 0;
> +
> + bytes_read = read(fd, buffer, sizeof(buffer) - 1);
> + if (bytes_read <= 0) {
> + close(fd);
> + return 0;
> + }
> +
> + close(fd);
> + if (atoi(buffer) != 1)
> + return 0;
> +
> + return 1;
> +}
You should probably factor out detect_huge_zeropage() from cow.c into
vm_utils.c, and let it return the result.
> +
> +void zeropfn_tests(void)
> +{
> + unsigned long long mem_size;
> + struct page_region vec;
> + int i, ret;
> + char *mem;
> +
> + /* Test with normal memory */
> + mem_size = 10 * page_size;
> + mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
> + if (mem == MAP_FAILED)
> + ksft_exit_fail_msg("error nomem\n");
> +
> + /* Touch each page to ensure it's mapped */
> + for (i = 0; i < mem_size; i += page_size)
> + (void)((volatile char *)mem)[i];
> +
> + ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0,
> + (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
> + if (ret < 0)
> + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
> +
> + ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size),
> + "%s all pages must have PFNZERO set\n", __func__);
> +
> + munmap(mem, mem_size);
> +
> + /* Test with huge page if user_zero_page is set to 1 */
> + if (!is_use_zero_page_set()) {
> + ksft_test_result_skip("%s use_zero_page not supported or set to 1\n", __func__);
> + return;
> + }
> +
> + mem_size = 10 * hpage_size;
> + mem = memalign(hpage_size, mem_size);
> + if (!mem)
> + ksft_exit_fail_msg("error nomem\n");
Didn't we discuss using mmap() instead?
See run_with_huge_zeropage() in cow.c on how to do the alignemnt
yourself easily.
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-07-04 12:26 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-02 14:20 [PATCH v2] selftests/mm: pagemap_scan ioctl: add PFN ZERO test cases Muhammad Usama Anjum
2025-07-04 12:26 ` David Hildenbrand
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).