From: Ackerley Tng <ackerleytng@google.com>
To: kvm@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, x86@kernel.org,
linux-fsdevel@vger.kernel.org
Cc: ackerleytng@google.com, aik@amd.com, ajones@ventanamicro.com,
akpm@linux-foundation.org, amoorthy@google.com,
anthony.yznaga@oracle.com, anup@brainfault.org,
aou@eecs.berkeley.edu, bfoster@redhat.com,
binbin.wu@linux.intel.com, brauner@kernel.org,
catalin.marinas@arm.com, chao.p.peng@intel.com,
chenhuacai@kernel.org, dave.hansen@intel.com, david@redhat.com,
dmatlack@google.com, dwmw@amazon.co.uk, erdemaktas@google.com,
fan.du@intel.com, fvdl@google.com, graf@amazon.com,
haibo1.xu@intel.com, hch@infradead.org, hughd@google.com,
ira.weiny@intel.com, isaku.yamahata@intel.com, jack@suse.cz,
james.morse@arm.com, jarkko@kernel.org, jgg@ziepe.ca,
jgowans@amazon.com, jhubbard@nvidia.com, jroedel@suse.de,
jthoughton@google.com, jun.miao@intel.com, kai.huang@intel.com,
keirf@google.com, kent.overstreet@linux.dev,
kirill.shutemov@intel.com, liam.merwick@oracle.com,
maciej.wieczor-retman@intel.com, mail@maciej.szmigiero.name,
maz@kernel.org, mic@digikod.net, michael.roth@amd.com,
mpe@ellerman.id.au, muchun.song@linux.dev, nikunj@amd.com,
nsaenz@amazon.es, oliver.upton@linux.dev, palmer@dabbelt.com,
pankaj.gupta@amd.com, paul.walmsley@sifive.com,
pbonzini@redhat.com, pdurrant@amazon.co.uk, peterx@redhat.com,
pgonda@google.com, pvorel@suse.cz, qperret@google.com,
quic_cvanscha@quicinc.com, quic_eberman@quicinc.com,
quic_mnalajal@quicinc.com, quic_pderrin@quicinc.com,
quic_pheragu@quicinc.com, quic_svaddagi@quicinc.com,
quic_tsoni@quicinc.com, richard.weiyang@gmail.com,
rick.p.edgecombe@intel.com, rientjes@google.com,
roypat@amazon.co.uk, rppt@kernel.org, seanjc@google.com,
shuah@kernel.org, steven.price@arm.com,
steven.sistare@oracle.com, suzuki.poulose@arm.com,
tabba@google.com, thomas.lendacky@amd.com,
usama.arif@bytedance.com, vannapurve@google.com, vbabka@suse.cz,
viro@zeniv.linux.org.uk, vkuznets@redhat.com,
wei.w.wang@intel.com, will@kernel.org, willy@infradead.org,
xiaoyao.li@intel.com, yan.y.zhao@intel.com, yilun.xu@intel.com,
yuzenghui@huawei.com, zhiquan1.li@intel.com
Subject: [RFC PATCH v2 43/51] KVM: selftests: Update conversion flows test for HugeTLB
Date: Wed, 14 May 2025 16:42:22 -0700 [thread overview]
Message-ID: <e232d7e84ebb1bffa323ec554474d0cf759e8bae.1747264138.git.ackerleytng@google.com> (raw)
In-Reply-To: <cover.1747264138.git.ackerleytng@google.com>
This patch updates conversion flows test to use
GUEST_MEMFD_FLAG_HUGETLB and tests with the 2MB and 1GB sizes.
Change-Id: If5d93cb776d6bebd504a80bba553bd534e62be38
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
.../kvm/guest_memfd_conversions_test.c | 171 ++++++++++--------
1 file changed, 98 insertions(+), 73 deletions(-)
diff --git a/tools/testing/selftests/kvm/guest_memfd_conversions_test.c b/tools/testing/selftests/kvm/guest_memfd_conversions_test.c
index 34eb6c9a37b1..22126454fd6b 100644
--- a/tools/testing/selftests/kvm/guest_memfd_conversions_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_conversions_test.c
@@ -5,6 +5,7 @@
* Copyright (c) 2024, Google LLC.
*/
#include <linux/kvm.h>
+#include <linux/sizes.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
@@ -228,6 +229,11 @@ static struct kvm_vm *setup_test(size_t test_page_size, bool init_private,
if (init_private)
flags |= GUEST_MEMFD_FLAG_INIT_PRIVATE;
+ if (test_page_size == SZ_2M)
+ flags |= GUEST_MEMFD_FLAG_HUGETLB | GUESTMEM_HUGETLB_FLAG_2MB;
+ else if (test_page_size == SZ_1G)
+ flags |= GUEST_MEMFD_FLAG_HUGETLB | GUESTMEM_HUGETLB_FLAG_1GB;
+
*guest_memfd = vm_create_guest_memfd(vm, test_page_size, flags);
TEST_ASSERT(*guest_memfd > 0, "guest_memfd creation failed");
@@ -249,79 +255,80 @@ static void cleanup_test(size_t guest_memfd_size, struct kvm_vm *vm,
TEST_ASSERT_EQ(close(guest_memfd), 0);
}
-static void test_sharing(void)
+static void test_sharing(size_t test_page_size)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int guest_memfd;
char *mem;
- vm = setup_test(PAGE_SIZE, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+ vm = setup_test(test_page_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
host_use_memory(mem, 'X', 'A');
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'A', 'B', 0);
/* Toggle private flag of memory attributes and run the test again. */
- guest_memfd_convert_private(guest_memfd, 0, PAGE_SIZE);
+ guest_memfd_convert_private(guest_memfd, 0, test_page_size);
assert_host_cannot_fault(mem);
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'B', 'C', 0);
- guest_memfd_convert_shared(guest_memfd, 0, PAGE_SIZE);
+ guest_memfd_convert_shared(guest_memfd, 0, test_page_size);
host_use_memory(mem, 'C', 'D');
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'D', 'E', 0);
- cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+ cleanup_test(test_page_size, vm, guest_memfd, mem);
}
-static void test_init_mappable_false(void)
+static void test_init_mappable_false(size_t test_page_size)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int guest_memfd;
char *mem;
- vm = setup_test(PAGE_SIZE, /*init_private=*/true, &vcpu, &guest_memfd, &mem);
+ vm = setup_test(test_page_size, /*init_private=*/true, &vcpu, &guest_memfd, &mem);
assert_host_cannot_fault(mem);
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
- guest_memfd_convert_shared(guest_memfd, 0, PAGE_SIZE);
+ guest_memfd_convert_shared(guest_memfd, 0, test_page_size);
host_use_memory(mem, 'A', 'B');
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'B', 'C', 0);
- cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+ cleanup_test(test_page_size, vm, guest_memfd, mem);
}
/*
* Test that even if there are no folios yet, conversion requests are recorded
* in guest_memfd.
*/
-static void test_conversion_before_allocation(void)
+static void test_conversion_before_allocation(size_t test_page_size)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int guest_memfd;
char *mem;
- vm = setup_test(PAGE_SIZE, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+ vm = setup_test(test_page_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
- guest_memfd_convert_private(guest_memfd, 0, PAGE_SIZE);
+ guest_memfd_convert_private(guest_memfd, 0, test_page_size);
assert_host_cannot_fault(mem);
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
- guest_memfd_convert_shared(guest_memfd, 0, PAGE_SIZE);
+ guest_memfd_convert_shared(guest_memfd, 0, test_page_size);
host_use_memory(mem, 'A', 'B');
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'B', 'C', 0);
- cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+ cleanup_test(test_page_size, vm, guest_memfd, mem);
}
-static void __test_conversion_if_not_all_folios_allocated(int total_nr_pages,
+static void __test_conversion_if_not_all_folios_allocated(size_t test_page_size,
+ int total_nr_pages,
int page_to_fault)
{
const int second_page_to_fault = 8;
@@ -332,15 +339,15 @@ static void __test_conversion_if_not_all_folios_allocated(int total_nr_pages,
char *mem;
int i;
- total_size = PAGE_SIZE * total_nr_pages;
+ total_size = test_page_size * total_nr_pages;
vm = setup_test(total_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
/*
* Fault 2 of the pages to test filemap range operations except when
* page_to_fault == second_page_to_fault.
*/
- host_use_memory(mem + page_to_fault * PAGE_SIZE, 'X', 'A');
- host_use_memory(mem + second_page_to_fault * PAGE_SIZE, 'X', 'A');
+ host_use_memory(mem + page_to_fault * test_page_size, 'X', 'A');
+ host_use_memory(mem + second_page_to_fault * test_page_size, 'X', 'A');
guest_memfd_convert_private(guest_memfd, 0, total_size);
@@ -348,37 +355,37 @@ static void __test_conversion_if_not_all_folios_allocated(int total_nr_pages,
bool is_faulted;
char expected;
- assert_host_cannot_fault(mem + i * PAGE_SIZE);
+ assert_host_cannot_fault(mem + i * test_page_size);
is_faulted = i == page_to_fault || i == second_page_to_fault;
expected = is_faulted ? 'A' : 'X';
guest_use_memory(vcpu,
- GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+ GUEST_MEMFD_SHARING_TEST_GVA + i * test_page_size,
expected, 'B', 0);
}
guest_memfd_convert_shared(guest_memfd, 0, total_size);
for (i = 0; i < total_nr_pages; ++i) {
- host_use_memory(mem + i * PAGE_SIZE, 'B', 'C');
+ host_use_memory(mem + i * test_page_size, 'B', 'C');
guest_use_memory(vcpu,
- GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+ GUEST_MEMFD_SHARING_TEST_GVA + i * test_page_size,
'C', 'D', 0);
}
cleanup_test(total_size, vm, guest_memfd, mem);
}
-static void test_conversion_if_not_all_folios_allocated(void)
+static void test_conversion_if_not_all_folios_allocated(size_t test_page_size)
{
const int total_nr_pages = 16;
int i;
for (i = 0; i < total_nr_pages; ++i)
- __test_conversion_if_not_all_folios_allocated(total_nr_pages, i);
+ __test_conversion_if_not_all_folios_allocated(test_page_size, total_nr_pages, i);
}
-static void test_conversions_should_not_affect_surrounding_pages(void)
+static void test_conversions_should_not_affect_surrounding_pages(size_t test_page_size)
{
struct kvm_vcpu *vcpu;
int page_to_convert;
@@ -391,40 +398,40 @@ static void test_conversions_should_not_affect_surrounding_pages(void)
page_to_convert = 2;
nr_pages = 4;
- total_size = PAGE_SIZE * nr_pages;
+ total_size = test_page_size * nr_pages;
vm = setup_test(total_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
for (i = 0; i < nr_pages; ++i) {
- host_use_memory(mem + i * PAGE_SIZE, 'X', 'A');
- guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+ host_use_memory(mem + i * test_page_size, 'X', 'A');
+ guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * test_page_size,
'A', 'B', 0);
}
- guest_memfd_convert_private(guest_memfd, PAGE_SIZE * page_to_convert, PAGE_SIZE);
+ guest_memfd_convert_private(guest_memfd, test_page_size * page_to_convert, test_page_size);
for (i = 0; i < nr_pages; ++i) {
char to_check;
if (i == page_to_convert) {
- assert_host_cannot_fault(mem + i * PAGE_SIZE);
+ assert_host_cannot_fault(mem + i * test_page_size);
to_check = 'B';
} else {
- host_use_memory(mem + i * PAGE_SIZE, 'B', 'C');
+ host_use_memory(mem + i * test_page_size, 'B', 'C');
to_check = 'C';
}
- guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+ guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * test_page_size,
to_check, 'D', 0);
}
- guest_memfd_convert_shared(guest_memfd, PAGE_SIZE * page_to_convert, PAGE_SIZE);
+ guest_memfd_convert_shared(guest_memfd, test_page_size * page_to_convert, test_page_size);
for (i = 0; i < nr_pages; ++i) {
- host_use_memory(mem + i * PAGE_SIZE, 'D', 'E');
- guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+ host_use_memory(mem + i * test_page_size, 'D', 'E');
+ guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * test_page_size,
'E', 'F', 0);
}
@@ -432,7 +439,7 @@ static void test_conversions_should_not_affect_surrounding_pages(void)
}
static void __test_conversions_should_fail_if_memory_has_elevated_refcount(
- int nr_pages, int page_to_convert)
+ size_t test_page_size, int nr_pages, int page_to_convert)
{
struct kvm_vcpu *vcpu;
loff_t error_offset;
@@ -443,50 +450,50 @@ static void __test_conversions_should_fail_if_memory_has_elevated_refcount(
int ret;
int i;
- total_size = PAGE_SIZE * nr_pages;
+ total_size = test_page_size * nr_pages;
vm = setup_test(total_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
- pin_pages(mem + page_to_convert * PAGE_SIZE, PAGE_SIZE);
+ pin_pages(mem + page_to_convert * test_page_size, test_page_size);
for (i = 0; i < nr_pages; i++) {
- host_use_memory(mem + i * PAGE_SIZE, 'X', 'A');
- guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+ host_use_memory(mem + i * test_page_size, 'X', 'A');
+ guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * test_page_size,
'A', 'B', 0);
}
error_offset = 0;
- ret = __guest_memfd_convert_private(guest_memfd, page_to_convert * PAGE_SIZE,
- PAGE_SIZE, &error_offset);
+ ret = __guest_memfd_convert_private(guest_memfd, page_to_convert * test_page_size,
+ test_page_size, &error_offset);
TEST_ASSERT_EQ(ret, -1);
TEST_ASSERT_EQ(errno, EAGAIN);
- TEST_ASSERT_EQ(error_offset, page_to_convert * PAGE_SIZE);
+ TEST_ASSERT_EQ(error_offset, page_to_convert * test_page_size);
unpin_pages();
- guest_memfd_convert_private(guest_memfd, page_to_convert * PAGE_SIZE, PAGE_SIZE);
+ guest_memfd_convert_private(guest_memfd, page_to_convert * test_page_size, test_page_size);
for (i = 0; i < nr_pages; i++) {
char expected;
if (i == page_to_convert)
- assert_host_cannot_fault(mem + i * PAGE_SIZE);
+ assert_host_cannot_fault(mem + i * test_page_size);
else
- host_use_memory(mem + i * PAGE_SIZE, 'B', 'C');
+ host_use_memory(mem + i * test_page_size, 'B', 'C');
expected = i == page_to_convert ? 'X' : 'C';
- guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+ guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * test_page_size,
expected, 'D', 0);
}
- guest_memfd_convert_shared(guest_memfd, page_to_convert * PAGE_SIZE, PAGE_SIZE);
+ guest_memfd_convert_shared(guest_memfd, page_to_convert * test_page_size, test_page_size);
for (i = 0; i < nr_pages; i++) {
char expected = i == page_to_convert ? 'X' : 'D';
- host_use_memory(mem + i * PAGE_SIZE, expected, 'E');
+ host_use_memory(mem + i * test_page_size, expected, 'E');
guest_use_memory(vcpu,
- GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+ GUEST_MEMFD_SHARING_TEST_GVA + i * test_page_size,
'E', 'F', 0);
}
@@ -496,15 +503,18 @@ static void __test_conversions_should_fail_if_memory_has_elevated_refcount(
* This test depends on CONFIG_GUP_TEST to provide a kernel module that exposes
* pin_user_pages() to userspace.
*/
-static void test_conversions_should_fail_if_memory_has_elevated_refcount(void)
+static void test_conversions_should_fail_if_memory_has_elevated_refcount(
+ size_t test_page_size)
{
int i;
- for (i = 0; i < 4; i++)
- __test_conversions_should_fail_if_memory_has_elevated_refcount(4, i);
+ for (i = 0; i < 4; i++) {
+ __test_conversions_should_fail_if_memory_has_elevated_refcount(
+ test_page_size, 4, i);
+ }
}
-static void test_truncate_should_not_change_mappability(void)
+static void test_truncate_should_not_change_mappability(size_t test_page_size)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -512,40 +522,40 @@ static void test_truncate_should_not_change_mappability(void)
char *mem;
int ret;
- vm = setup_test(PAGE_SIZE, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+ vm = setup_test(test_page_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
host_use_memory(mem, 'X', 'A');
ret = fallocate(guest_memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
- 0, PAGE_SIZE);
+ 0, test_page_size);
TEST_ASSERT(!ret, "truncating the first page should succeed");
host_use_memory(mem, 'X', 'A');
- guest_memfd_convert_private(guest_memfd, 0, PAGE_SIZE);
+ guest_memfd_convert_private(guest_memfd, 0, test_page_size);
assert_host_cannot_fault(mem);
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'A', 'A', 0);
ret = fallocate(guest_memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
- 0, PAGE_SIZE);
+ 0, test_page_size);
TEST_ASSERT(!ret, "truncating the first page should succeed");
assert_host_cannot_fault(mem);
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
- cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+ cleanup_test(test_page_size, vm, guest_memfd, mem);
}
-static void test_fault_type_independent_of_mem_attributes(void)
+static void test_fault_type_independent_of_mem_attributes(size_t test_page_size)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int guest_memfd;
char *mem;
- vm = setup_test(PAGE_SIZE, /*init_private=*/true, &vcpu, &guest_memfd, &mem);
- vm_mem_set_shared(vm, GUEST_MEMFD_SHARING_TEST_GPA, PAGE_SIZE);
+ vm = setup_test(test_page_size, /*init_private=*/true, &vcpu, &guest_memfd, &mem);
+ vm_mem_set_shared(vm, GUEST_MEMFD_SHARING_TEST_GPA, test_page_size);
/*
* kvm->mem_attr_array set to shared, guest_memfd memory initialized as
@@ -558,8 +568,8 @@ static void test_fault_type_independent_of_mem_attributes(void)
/* Guest can fault and use memory. */
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
- guest_memfd_convert_shared(guest_memfd, 0, PAGE_SIZE);
- vm_mem_set_private(vm, GUEST_MEMFD_SHARING_TEST_GPA, PAGE_SIZE);
+ guest_memfd_convert_shared(guest_memfd, 0, test_page_size);
+ vm_mem_set_private(vm, GUEST_MEMFD_SHARING_TEST_GPA, test_page_size);
/* Host can use shared memory. */
host_use_memory(mem, 'X', 'A');
@@ -567,7 +577,19 @@ static void test_fault_type_independent_of_mem_attributes(void)
/* Guest can also use shared memory. */
guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
- cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+ cleanup_test(test_page_size, vm, guest_memfd, mem);
+}
+
+static void test_with_size(size_t test_page_size)
+{
+ test_sharing(test_page_size);
+ test_init_mappable_false(test_page_size);
+ test_conversion_before_allocation(test_page_size);
+ test_conversion_if_not_all_folios_allocated(test_page_size);
+ test_conversions_should_not_affect_surrounding_pages(test_page_size);
+ test_truncate_should_not_change_mappability(test_page_size);
+ test_conversions_should_fail_if_memory_has_elevated_refcount(test_page_size);
+ test_fault_type_independent_of_mem_attributes(test_page_size);
}
int main(int argc, char *argv[])
@@ -576,14 +598,17 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_check_cap(KVM_CAP_GMEM_SHARED_MEM));
TEST_REQUIRE(kvm_check_cap(KVM_CAP_GMEM_CONVERSION));
- test_sharing();
- test_init_mappable_false();
- test_conversion_before_allocation();
- test_conversion_if_not_all_folios_allocated();
- test_conversions_should_not_affect_surrounding_pages();
- test_truncate_should_not_change_mappability();
- test_conversions_should_fail_if_memory_has_elevated_refcount();
- test_fault_type_independent_of_mem_attributes();
+ printf("Test guest_memfd with 4K pages\n");
+ test_with_size(PAGE_SIZE);
+ printf("\tPASSED\n");
+
+ printf("Test guest_memfd with 2M pages\n");
+ test_with_size(SZ_2M);
+ printf("\tPASSED\n");
+
+ printf("Test guest_memfd with 1G pages\n");
+ test_with_size(SZ_1G);
+ printf("\tPASSED\n");
return 0;
}
--
2.49.0.1045.g170613ef41-goog
next prev parent reply other threads:[~2025-05-14 23:44 UTC|newest]
Thread overview: 231+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-14 23:41 [RFC PATCH v2 00/51] 1G page support for guest_memfd Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 01/51] KVM: guest_memfd: Make guest mem use guest mem inodes instead of anonymous inodes Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 02/51] KVM: guest_memfd: Introduce and use shareability to guard faulting Ackerley Tng
2025-05-27 3:54 ` Yan Zhao
2025-05-29 18:20 ` Ackerley Tng
2025-05-30 8:53 ` Fuad Tabba
2025-05-30 18:32 ` Ackerley Tng
2025-06-02 9:43 ` Fuad Tabba
2025-05-27 8:25 ` Binbin Wu
2025-05-27 8:43 ` Binbin Wu
2025-05-29 18:26 ` Ackerley Tng
2025-05-29 20:37 ` Ackerley Tng
2025-05-29 5:42 ` Michael Roth
2025-06-11 21:51 ` Ackerley Tng
2025-07-02 23:25 ` Michael Roth
2025-07-03 0:46 ` Vishal Annapurve
2025-07-03 0:52 ` Vishal Annapurve
2025-07-03 4:12 ` Michael Roth
2025-07-03 5:10 ` Vishal Annapurve
2025-07-03 20:39 ` Michael Roth
2025-07-07 14:55 ` Vishal Annapurve
2025-07-12 0:10 ` Michael Roth
2025-07-12 17:53 ` Vishal Annapurve
2025-08-12 8:23 ` Fuad Tabba
2025-08-13 17:11 ` Ira Weiny
2025-06-11 22:10 ` Ackerley Tng
2025-08-01 0:01 ` Yan Zhao
2025-08-14 21:35 ` Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 03/51] KVM: selftests: Update guest_memfd_test for INIT_PRIVATE flag Ackerley Tng
2025-05-15 13:49 ` Ira Weiny
2025-05-16 17:42 ` Ackerley Tng
2025-05-16 19:31 ` Ira Weiny
2025-05-27 8:53 ` Binbin Wu
2025-05-30 19:59 ` Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 04/51] KVM: guest_memfd: Introduce KVM_GMEM_CONVERT_SHARED/PRIVATE ioctls Ackerley Tng
2025-05-15 14:50 ` Ira Weiny
2025-05-16 17:53 ` Ackerley Tng
2025-05-20 9:22 ` Fuad Tabba
2025-05-20 13:02 ` Vishal Annapurve
2025-05-20 13:44 ` Fuad Tabba
2025-05-20 14:11 ` Vishal Annapurve
2025-05-20 14:33 ` Fuad Tabba
2025-05-20 16:02 ` Vishal Annapurve
2025-05-20 18:05 ` Fuad Tabba
2025-05-20 19:40 ` Ackerley Tng
2025-05-21 12:36 ` Fuad Tabba
2025-05-21 14:42 ` Vishal Annapurve
2025-05-21 15:21 ` Fuad Tabba
2025-05-21 15:51 ` Vishal Annapurve
2025-05-21 18:27 ` Fuad Tabba
2025-05-22 14:52 ` Sean Christopherson
2025-05-22 15:07 ` Fuad Tabba
2025-05-22 16:26 ` Sean Christopherson
2025-05-23 10:12 ` Fuad Tabba
2025-06-24 8:23 ` Alexey Kardashevskiy
2025-06-24 13:08 ` Jason Gunthorpe
2025-06-24 14:10 ` Vishal Annapurve
2025-06-27 4:49 ` Alexey Kardashevskiy
2025-06-27 15:17 ` Vishal Annapurve
2025-06-30 0:19 ` Alexey Kardashevskiy
2025-06-30 14:19 ` Vishal Annapurve
2025-07-10 6:57 ` Alexey Kardashevskiy
2025-07-10 17:58 ` Jason Gunthorpe
2025-07-02 8:35 ` Yan Zhao
2025-07-02 13:54 ` Vishal Annapurve
2025-07-02 14:13 ` Jason Gunthorpe
2025-07-02 14:32 ` Vishal Annapurve
2025-07-10 10:50 ` Xu Yilun
2025-07-10 17:54 ` Jason Gunthorpe
2025-07-11 4:31 ` Xu Yilun
2025-07-11 9:33 ` Xu Yilun
2025-07-16 22:22 ` Ackerley Tng
2025-07-17 9:32 ` Xu Yilun
2025-07-17 16:56 ` Ackerley Tng
2025-07-18 2:48 ` Xu Yilun
2025-07-18 14:15 ` Jason Gunthorpe
2025-07-21 14:18 ` Xu Yilun
2025-07-18 15:13 ` Ira Weiny
2025-07-21 9:58 ` Xu Yilun
2025-07-22 18:17 ` Ackerley Tng
2025-07-22 19:25 ` Edgecombe, Rick P
2025-05-28 3:16 ` Binbin Wu
2025-05-30 20:10 ` Ackerley Tng
2025-06-03 0:54 ` Binbin Wu
2025-05-14 23:41 ` [RFC PATCH v2 05/51] KVM: guest_memfd: Skip LRU for guest_memfd folios Ackerley Tng
2025-05-28 7:01 ` Binbin Wu
2025-05-30 20:32 ` Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 06/51] KVM: Query guest_memfd for private/shared status Ackerley Tng
2025-05-27 3:55 ` Yan Zhao
2025-05-28 8:08 ` Binbin Wu
2025-05-28 9:55 ` Yan Zhao
2025-05-14 23:41 ` [RFC PATCH v2 07/51] KVM: guest_memfd: Add CAP KVM_CAP_GMEM_CONVERSION Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 08/51] KVM: selftests: Test flag validity after guest_memfd supports conversions Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 09/51] KVM: selftests: Test faulting with respect to GUEST_MEMFD_FLAG_INIT_PRIVATE Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 10/51] KVM: selftests: Refactor vm_mem_add to be more flexible Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 11/51] KVM: selftests: Allow cleanup of ucall_pool from host Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 12/51] KVM: selftests: Test conversion flows for guest_memfd Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 13/51] KVM: selftests: Add script to exercise private_mem_conversions_test Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 14/51] KVM: selftests: Update private_mem_conversions_test to mmap guest_memfd Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 15/51] KVM: selftests: Update script to map shared memory from guest_memfd Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 16/51] mm: hugetlb: Consolidate interpretation of gbl_chg within alloc_hugetlb_folio() Ackerley Tng
2025-05-15 2:09 ` Matthew Wilcox
2025-05-28 8:55 ` Binbin Wu
2025-07-07 18:27 ` James Houghton
2025-05-14 23:41 ` [RFC PATCH v2 17/51] mm: hugetlb: Cleanup interpretation of gbl_chg in alloc_hugetlb_folio() Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 18/51] mm: hugetlb: Cleanup interpretation of map_chg_state within alloc_hugetlb_folio() Ackerley Tng
2025-07-07 18:08 ` James Houghton
2025-05-14 23:41 ` [RFC PATCH v2 19/51] mm: hugetlb: Rename alloc_surplus_hugetlb_folio Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 20/51] mm: mempolicy: Refactor out policy_node_nodemask() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 21/51] mm: hugetlb: Inline huge_node() into callers Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 22/51] mm: hugetlb: Refactor hugetlb allocation functions Ackerley Tng
2025-05-31 23:45 ` Ira Weiny
2025-06-13 22:03 ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 23/51] mm: hugetlb: Refactor out hugetlb_alloc_folio() Ackerley Tng
2025-06-01 0:38 ` Ira Weiny
2025-06-13 22:07 ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 24/51] mm: hugetlb: Add option to create new subpool without using surplus Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 25/51] mm: truncate: Expose preparation steps for truncate_inode_pages_final Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 26/51] mm: Consolidate freeing of typed folios on final folio_put() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 27/51] mm: hugetlb: Expose hugetlb_subpool_{get,put}_pages() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 28/51] mm: Introduce guestmem_hugetlb to support folio_put() handling of guestmem pages Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 29/51] mm: guestmem_hugetlb: Wrap HugeTLB as an allocator for guest_memfd Ackerley Tng
2025-05-16 14:07 ` Ackerley Tng
2025-05-16 20:33 ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 30/51] mm: truncate: Expose truncate_inode_folio() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 31/51] KVM: x86: Set disallow_lpage on base_gfn and guest_memfd pgoff misalignment Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 32/51] KVM: guest_memfd: Support guestmem_hugetlb as custom allocator Ackerley Tng
2025-05-23 10:47 ` Yan Zhao
2025-08-12 9:13 ` Tony Lindgren
2025-05-14 23:42 ` [RFC PATCH v2 33/51] KVM: guest_memfd: Allocate and truncate from " Ackerley Tng
2025-05-21 18:05 ` Vishal Annapurve
2025-05-22 23:12 ` Edgecombe, Rick P
2025-05-28 10:58 ` Yan Zhao
2025-06-03 7:43 ` Binbin Wu
2025-07-16 22:13 ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 34/51] mm: hugetlb: Add functions to add/delete folio from hugetlb lists Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 35/51] mm: guestmem_hugetlb: Add support for splitting and merging pages Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 36/51] mm: Convert split_folio() macro to function Ackerley Tng
2025-05-21 16:40 ` Edgecombe, Rick P
2025-05-14 23:42 ` [RFC PATCH v2 37/51] filemap: Pass address_space mapping to ->free_folio() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 38/51] KVM: guest_memfd: Split allocator pages for guest_memfd use Ackerley Tng
2025-05-22 22:19 ` Edgecombe, Rick P
2025-06-05 17:15 ` Ackerley Tng
2025-06-05 17:53 ` Edgecombe, Rick P
2025-06-05 17:15 ` Ackerley Tng
2025-06-05 17:16 ` Ackerley Tng
2025-06-05 17:16 ` Ackerley Tng
2025-06-05 17:16 ` Ackerley Tng
2025-05-27 4:30 ` Yan Zhao
2025-05-27 4:38 ` Yan Zhao
2025-06-05 17:50 ` Ackerley Tng
2025-05-27 8:45 ` Yan Zhao
2025-06-05 19:10 ` Ackerley Tng
2025-06-16 11:15 ` Yan Zhao
2025-06-05 5:24 ` Binbin Wu
2025-06-05 19:16 ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 39/51] KVM: guest_memfd: Merge and truncate on fallocate(PUNCH_HOLE) Ackerley Tng
2025-05-28 11:00 ` Yan Zhao
2025-05-28 16:39 ` Ackerley Tng
2025-05-29 3:26 ` Yan Zhao
2025-05-14 23:42 ` [RFC PATCH v2 40/51] KVM: guest_memfd: Update kvm_gmem_mapping_order to account for page status Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 41/51] KVM: Add CAP to indicate support for HugeTLB as custom allocator Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 42/51] KVM: selftests: Add basic selftests for hugetlb-backed guest_memfd Ackerley Tng
2025-05-14 23:42 ` Ackerley Tng [this message]
2025-05-14 23:42 ` [RFC PATCH v2 44/51] KVM: selftests: Test truncation paths of guest_memfd Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 45/51] KVM: selftests: Test allocation and conversion of subfolios Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 46/51] KVM: selftests: Test that guest_memfd usage is reported via hugetlb Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 47/51] KVM: selftests: Support various types of backing sources for private memory Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 48/51] KVM: selftests: Update test for various private memory backing source types Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 49/51] KVM: selftests: Update private_mem_conversions_test.sh to test with HugeTLB pages Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 50/51] KVM: selftests: Add script to test HugeTLB statistics Ackerley Tng
2025-05-15 18:03 ` [RFC PATCH v2 00/51] 1G page support for guest_memfd Edgecombe, Rick P
2025-05-15 18:42 ` Vishal Annapurve
2025-05-15 23:35 ` Edgecombe, Rick P
2025-05-16 0:57 ` Sean Christopherson
2025-05-16 2:12 ` Edgecombe, Rick P
2025-05-16 13:11 ` Vishal Annapurve
2025-05-16 16:45 ` Edgecombe, Rick P
2025-05-16 17:51 ` Sean Christopherson
2025-05-16 19:14 ` Edgecombe, Rick P
2025-05-16 20:25 ` Dave Hansen
2025-05-16 21:42 ` Edgecombe, Rick P
2025-05-16 17:45 ` Sean Christopherson
2025-05-16 13:09 ` Jason Gunthorpe
2025-05-16 17:04 ` Edgecombe, Rick P
2025-05-16 0:22 ` [RFC PATCH v2 51/51] KVM: selftests: Test guest_memfd for accuracy of st_blocks Ackerley Tng
2025-05-16 19:48 ` [RFC PATCH v2 00/51] 1G page support for guest_memfd Ira Weiny
2025-05-16 19:59 ` Ira Weiny
2025-05-16 20:26 ` Ackerley Tng
2025-05-16 22:43 ` Ackerley Tng
2025-06-19 8:13 ` Yan Zhao
2025-06-19 8:59 ` Xiaoyao Li
2025-06-19 9:18 ` Xiaoyao Li
2025-06-19 9:28 ` Yan Zhao
2025-06-19 9:45 ` Xiaoyao Li
2025-06-19 9:49 ` Xiaoyao Li
2025-06-29 18:28 ` Vishal Annapurve
2025-06-30 3:14 ` Yan Zhao
2025-06-30 14:14 ` Vishal Annapurve
2025-07-01 5:23 ` Yan Zhao
2025-07-01 19:48 ` Vishal Annapurve
2025-07-07 23:25 ` Sean Christopherson
2025-07-08 0:14 ` Vishal Annapurve
2025-07-08 1:08 ` Edgecombe, Rick P
2025-07-08 14:20 ` Sean Christopherson
2025-07-08 14:52 ` Edgecombe, Rick P
2025-07-08 15:07 ` Vishal Annapurve
2025-07-08 15:31 ` Edgecombe, Rick P
2025-07-08 17:16 ` Vishal Annapurve
2025-07-08 17:39 ` Edgecombe, Rick P
2025-07-08 18:03 ` Sean Christopherson
2025-07-08 18:13 ` Edgecombe, Rick P
2025-07-08 18:55 ` Sean Christopherson
2025-07-08 21:23 ` Edgecombe, Rick P
2025-07-09 14:28 ` Vishal Annapurve
2025-07-09 15:00 ` Sean Christopherson
2025-07-10 1:30 ` Vishal Annapurve
2025-07-10 23:33 ` Sean Christopherson
2025-07-11 21:18 ` Vishal Annapurve
2025-07-12 17:33 ` Vishal Annapurve
2025-07-09 15:17 ` Edgecombe, Rick P
2025-07-10 3:39 ` Vishal Annapurve
2025-07-08 19:28 ` Vishal Annapurve
2025-07-08 19:58 ` Sean Christopherson
2025-07-08 22:54 ` Vishal Annapurve
2025-07-08 15:38 ` Sean Christopherson
2025-07-08 16:22 ` Fuad Tabba
2025-07-08 17:25 ` Sean Christopherson
2025-07-08 18:37 ` Fuad Tabba
2025-07-16 23:06 ` Ackerley Tng
2025-06-26 23:19 ` Ackerley Tng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=e232d7e84ebb1bffa323ec554474d0cf759e8bae.1747264138.git.ackerleytng@google.com \
--to=ackerleytng@google.com \
--cc=aik@amd.com \
--cc=ajones@ventanamicro.com \
--cc=akpm@linux-foundation.org \
--cc=amoorthy@google.com \
--cc=anthony.yznaga@oracle.com \
--cc=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=bfoster@redhat.com \
--cc=binbin.wu@linux.intel.com \
--cc=brauner@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=chao.p.peng@intel.com \
--cc=chenhuacai@kernel.org \
--cc=dave.hansen@intel.com \
--cc=david@redhat.com \
--cc=dmatlack@google.com \
--cc=dwmw@amazon.co.uk \
--cc=erdemaktas@google.com \
--cc=fan.du@intel.com \
--cc=fvdl@google.com \
--cc=graf@amazon.com \
--cc=haibo1.xu@intel.com \
--cc=hch@infradead.org \
--cc=hughd@google.com \
--cc=ira.weiny@intel.com \
--cc=isaku.yamahata@intel.com \
--cc=jack@suse.cz \
--cc=james.morse@arm.com \
--cc=jarkko@kernel.org \
--cc=jgg@ziepe.ca \
--cc=jgowans@amazon.com \
--cc=jhubbard@nvidia.com \
--cc=jroedel@suse.de \
--cc=jthoughton@google.com \
--cc=jun.miao@intel.com \
--cc=kai.huang@intel.com \
--cc=keirf@google.com \
--cc=kent.overstreet@linux.dev \
--cc=kirill.shutemov@intel.com \
--cc=kvm@vger.kernel.org \
--cc=liam.merwick@oracle.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maciej.wieczor-retman@intel.com \
--cc=mail@maciej.szmigiero.name \
--cc=maz@kernel.org \
--cc=mic@digikod.net \
--cc=michael.roth@amd.com \
--cc=mpe@ellerman.id.au \
--cc=muchun.song@linux.dev \
--cc=nikunj@amd.com \
--cc=nsaenz@amazon.es \
--cc=oliver.upton@linux.dev \
--cc=palmer@dabbelt.com \
--cc=pankaj.gupta@amd.com \
--cc=paul.walmsley@sifive.com \
--cc=pbonzini@redhat.com \
--cc=pdurrant@amazon.co.uk \
--cc=peterx@redhat.com \
--cc=pgonda@google.com \
--cc=pvorel@suse.cz \
--cc=qperret@google.com \
--cc=quic_cvanscha@quicinc.com \
--cc=quic_eberman@quicinc.com \
--cc=quic_mnalajal@quicinc.com \
--cc=quic_pderrin@quicinc.com \
--cc=quic_pheragu@quicinc.com \
--cc=quic_svaddagi@quicinc.com \
--cc=quic_tsoni@quicinc.com \
--cc=richard.weiyang@gmail.com \
--cc=rick.p.edgecombe@intel.com \
--cc=rientjes@google.com \
--cc=roypat@amazon.co.uk \
--cc=rppt@kernel.org \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=steven.price@arm.com \
--cc=steven.sistare@oracle.com \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=thomas.lendacky@amd.com \
--cc=usama.arif@bytedance.com \
--cc=vannapurve@google.com \
--cc=vbabka@suse.cz \
--cc=viro@zeniv.linux.org.uk \
--cc=vkuznets@redhat.com \
--cc=wei.w.wang@intel.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
--cc=xiaoyao.li@intel.com \
--cc=yan.y.zhao@intel.com \
--cc=yilun.xu@intel.com \
--cc=yuzenghui@huawei.com \
--cc=zhiquan1.li@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).