linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ackerley Tng <ackerleytng@google.com>
To: kvm@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,  x86@kernel.org,
	linux-fsdevel@vger.kernel.org
Cc: ackerleytng@google.com, aik@amd.com, ajones@ventanamicro.com,
	 akpm@linux-foundation.org, amoorthy@google.com,
	anthony.yznaga@oracle.com,  anup@brainfault.org,
	aou@eecs.berkeley.edu, bfoster@redhat.com,
	 binbin.wu@linux.intel.com, brauner@kernel.org,
	catalin.marinas@arm.com,  chao.p.peng@intel.com,
	chenhuacai@kernel.org, dave.hansen@intel.com,  david@redhat.com,
	dmatlack@google.com, dwmw@amazon.co.uk,  erdemaktas@google.com,
	fan.du@intel.com, fvdl@google.com, graf@amazon.com,
	 haibo1.xu@intel.com, hch@infradead.org, hughd@google.com,
	ira.weiny@intel.com,  isaku.yamahata@intel.com, jack@suse.cz,
	james.morse@arm.com,  jarkko@kernel.org, jgg@ziepe.ca,
	jgowans@amazon.com, jhubbard@nvidia.com,  jroedel@suse.de,
	jthoughton@google.com, jun.miao@intel.com,  kai.huang@intel.com,
	keirf@google.com, kent.overstreet@linux.dev,
	 kirill.shutemov@intel.com, liam.merwick@oracle.com,
	 maciej.wieczor-retman@intel.com, mail@maciej.szmigiero.name,
	maz@kernel.org,  mic@digikod.net, michael.roth@amd.com,
	mpe@ellerman.id.au,  muchun.song@linux.dev, nikunj@amd.com,
	nsaenz@amazon.es,  oliver.upton@linux.dev, palmer@dabbelt.com,
	pankaj.gupta@amd.com,  paul.walmsley@sifive.com,
	pbonzini@redhat.com, pdurrant@amazon.co.uk,  peterx@redhat.com,
	pgonda@google.com, pvorel@suse.cz, qperret@google.com,
	 quic_cvanscha@quicinc.com, quic_eberman@quicinc.com,
	 quic_mnalajal@quicinc.com, quic_pderrin@quicinc.com,
	quic_pheragu@quicinc.com,  quic_svaddagi@quicinc.com,
	quic_tsoni@quicinc.com, richard.weiyang@gmail.com,
	 rick.p.edgecombe@intel.com, rientjes@google.com,
	roypat@amazon.co.uk,  rppt@kernel.org, seanjc@google.com,
	shuah@kernel.org, steven.price@arm.com,
	 steven.sistare@oracle.com, suzuki.poulose@arm.com,
	tabba@google.com,  thomas.lendacky@amd.com,
	usama.arif@bytedance.com, vannapurve@google.com,  vbabka@suse.cz,
	viro@zeniv.linux.org.uk, vkuznets@redhat.com,
	 wei.w.wang@intel.com, will@kernel.org, willy@infradead.org,
	 xiaoyao.li@intel.com, yan.y.zhao@intel.com, yilun.xu@intel.com,
	 yuzenghui@huawei.com, zhiquan1.li@intel.com
Subject: [RFC PATCH v2 12/51] KVM: selftests: Test conversion flows for guest_memfd
Date: Wed, 14 May 2025 16:41:51 -0700	[thread overview]
Message-ID: <baa8838f623102931e755cf34c86314b305af49c.1747264138.git.ackerleytng@google.com> (raw)
In-Reply-To: <cover.1747264138.git.ackerleytng@google.com>

Add minimal tests for guest_memfd to test that when memory is marked
shared in a VM, the host can read and write to it via an mmap()ed
address, and the guest can also read and write to it.

Tests added in this patch use refcounts taken via GUP (requiring
CONFIG_GUP_TEST) to simulate unexpected refcounts on guest_memfd
pages.

Test that unexpected refcounts cause conversions to fail.

Change-Id: I4f8c05aa511bcb9a34921a54fc8315ed89629018
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
 tools/testing/selftests/kvm/Makefile.kvm      |   1 +
 .../kvm/guest_memfd_conversions_test.c        | 589 ++++++++++++++++++
 .../testing/selftests/kvm/include/kvm_util.h  |  74 +++
 3 files changed, 664 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/guest_memfd_conversions_test.c

diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index ccf95ed037c3..bc22a5a23c4c 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -131,6 +131,7 @@ TEST_GEN_PROGS_x86 += access_tracking_perf_test
 TEST_GEN_PROGS_x86 += coalesced_io_test
 TEST_GEN_PROGS_x86 += dirty_log_perf_test
 TEST_GEN_PROGS_x86 += guest_memfd_test
+TEST_GEN_PROGS_x86 += guest_memfd_conversions_test
 TEST_GEN_PROGS_x86 += hardware_disable_test
 TEST_GEN_PROGS_x86 += memslot_modification_stress_test
 TEST_GEN_PROGS_x86 += memslot_perf_test
diff --git a/tools/testing/selftests/kvm/guest_memfd_conversions_test.c b/tools/testing/selftests/kvm/guest_memfd_conversions_test.c
new file mode 100644
index 000000000000..34eb6c9a37b1
--- /dev/null
+++ b/tools/testing/selftests/kvm/guest_memfd_conversions_test.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test conversion flows for guest_memfd.
+ *
+ * Copyright (c) 2024, Google LLC.
+ */
+#include <linux/kvm.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+#include "ucall_common.h"
+#include "../../../../mm/gup_test.h"
+
+#define GUEST_MEMFD_SHARING_TEST_SLOT 10
+/*
+ * Use high GPA above APIC_DEFAULT_PHYS_BASE to avoid clashing with
+ * APIC_DEFAULT_PHYS_BASE.
+ */
+#define GUEST_MEMFD_SHARING_TEST_GPA 0x100000000ULL
+#define GUEST_MEMFD_SHARING_TEST_GVA 0x90000000ULL
+
+static int gup_test_fd;
+
+static void pin_pages(void *vaddr, uint64_t size)
+{
+	const struct pin_longterm_test args = {
+		.addr = (uint64_t)vaddr,
+		.size = size,
+		.flags = PIN_LONGTERM_TEST_FLAG_USE_WRITE,
+	};
+
+	gup_test_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
+	TEST_REQUIRE(gup_test_fd > 0);
+
+	TEST_ASSERT_EQ(ioctl(gup_test_fd, PIN_LONGTERM_TEST_START, &args), 0);
+}
+
+static void unpin_pages(void)
+{
+	TEST_ASSERT_EQ(ioctl(gup_test_fd, PIN_LONGTERM_TEST_STOP), 0);
+}
+
+static void guest_check_mem(uint64_t gva, char expected_read_value, char write_value)
+{
+	char *mem = (char *)gva;
+
+	if (expected_read_value != 'X')
+		GUEST_ASSERT_EQ(*mem, expected_read_value);
+
+	if (write_value != 'X')
+		*mem = write_value;
+
+	GUEST_DONE();
+}
+
+static int vcpu_run_handle_basic_ucalls(struct kvm_vcpu *vcpu)
+{
+	struct ucall uc;
+	int rc;
+
+keep_going:
+	do {
+		rc = __vcpu_run(vcpu);
+	} while (rc == -1 && errno == EINTR);
+
+	switch (get_ucall(vcpu, &uc)) {
+	case UCALL_PRINTF:
+		REPORT_GUEST_PRINTF(uc);
+		goto keep_going;
+	case UCALL_ABORT:
+		REPORT_GUEST_ASSERT(uc);
+	}
+
+	return rc;
+}
+
+/**
+ * guest_use_memory() - Assert that guest can use memory at @gva.
+ *
+ * @vcpu: the vcpu to run this test on.
+ * @gva: the virtual address in the guest to try to use.
+ * @expected_read_value: the value that is expected at @gva. Set this to 'X' to
+ *                       skip checking current value.
+ * @write_value: value to write to @gva. Set to 'X' to skip writing value to
+ *               @address.
+ * @expected_errno: the expected errno if an error is expected while reading or
+ *                  writing @gva. Set to 0 if no exception is expected,
+ *                  otherwise set it to the expected errno. If @expected_errno
+ *                  is set, 'Z' is used instead of @expected_read_value or
+ *                  @write_value.
+ */
+static void guest_use_memory(struct kvm_vcpu *vcpu, uint64_t gva,
+			     char expected_read_value, char write_value,
+			     int expected_errno)
+{
+	struct kvm_regs original_regs;
+	int rc;
+
+	if (expected_errno > 0) {
+		expected_read_value = 'Z';
+		write_value = 'Z';
+	}
+
+	/*
+	 * Backup and vCPU state from first run so that guest_check_mem can be
+	 * run again and again.
+	 */
+	vcpu_regs_get(vcpu, &original_regs);
+
+	vcpu_args_set(vcpu, 3, gva, expected_read_value, write_value);
+	vcpu_arch_set_entry_point(vcpu, guest_check_mem);
+
+	rc = vcpu_run_handle_basic_ucalls(vcpu);
+
+	if (expected_errno) {
+		TEST_ASSERT_EQ(rc, -1);
+		TEST_ASSERT_EQ(errno, expected_errno);
+
+		switch (expected_errno) {
+		case EFAULT:
+			TEST_ASSERT_EQ(vcpu->run->exit_reason, 0);
+			break;
+		case EACCES:
+			TEST_ASSERT_EQ(vcpu->run->exit_reason, KVM_EXIT_MEMORY_FAULT);
+			break;
+		}
+	} else {
+		struct ucall uc;
+
+		TEST_ASSERT_EQ(rc, 0);
+		TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_DONE);
+
+		/*
+		 * UCALL_DONE() uses up one struct ucall slot. To reuse the slot
+		 * in another run of guest_check_mem, free up that slot.
+		 */
+		ucall_free((struct ucall *)uc.hva);
+	}
+
+	vcpu_regs_set(vcpu, &original_regs);
+}
+
+/**
+ * host_use_memory() - Assert that host can fault and use memory at @address.
+ *
+ * @address: the address to be testing.
+ * @expected_read_value: the value expected to be read from @address. Set to 'X'
+ *                       to skip checking current value at @address.
+ * @write_value: the value to write to @address. Set to 'X' to skip writing
+ *               value to @address.
+ */
+static void host_use_memory(char *address, char expected_read_value,
+			    char write_value)
+{
+	if (expected_read_value != 'X')
+		TEST_ASSERT_EQ(*address, expected_read_value);
+
+	if (write_value != 'X')
+		*address = write_value;
+}
+
+static void assert_host_cannot_fault(char *address)
+{
+	pid_t child_pid;
+
+	child_pid = fork();
+	TEST_ASSERT(child_pid != -1, "fork failed");
+
+	if (child_pid == 0) {
+		*address = 'A';
+		TEST_FAIL("Child should have exited with a signal");
+	} else {
+		int status;
+
+		waitpid(child_pid, &status, 0);
+
+		TEST_ASSERT(WIFSIGNALED(status),
+			    "Child should have exited with a signal");
+		TEST_ASSERT_EQ(WTERMSIG(status), SIGBUS);
+	}
+}
+
+static void *add_memslot(struct kvm_vm *vm, size_t memslot_size, int guest_memfd)
+{
+	struct userspace_mem_region *region;
+	void *mem;
+
+	TEST_REQUIRE(guest_memfd > 0);
+
+	region = vm_mem_region_alloc(vm);
+
+	guest_memfd = vm_mem_region_install_guest_memfd(region, guest_memfd);
+	mem = vm_mem_region_mmap(region, memslot_size, MAP_SHARED, guest_memfd, 0);
+	vm_mem_region_install_memory(region, memslot_size, PAGE_SIZE);
+
+	region->region.slot = GUEST_MEMFD_SHARING_TEST_SLOT;
+	region->region.flags = KVM_MEM_GUEST_MEMFD;
+	region->region.guest_phys_addr = GUEST_MEMFD_SHARING_TEST_GPA;
+	region->region.guest_memfd_offset = 0;
+
+	vm_mem_region_add(vm, region);
+
+	return mem;
+}
+
+static struct kvm_vm *setup_test(size_t test_page_size, bool init_private,
+				 struct kvm_vcpu **vcpu, int *guest_memfd,
+				 char **mem)
+{
+	const struct vm_shape shape = {
+		.mode = VM_MODE_DEFAULT,
+		.type = KVM_X86_SW_PROTECTED_VM,
+	};
+	size_t test_nr_pages;
+	struct kvm_vm *vm;
+	uint64_t flags;
+
+	test_nr_pages = test_page_size / PAGE_SIZE;
+	vm = __vm_create_shape_with_one_vcpu(shape, vcpu, test_nr_pages, NULL);
+
+	flags = GUEST_MEMFD_FLAG_SUPPORT_SHARED;
+	if (init_private)
+		flags |= GUEST_MEMFD_FLAG_INIT_PRIVATE;
+
+	*guest_memfd = vm_create_guest_memfd(vm, test_page_size, flags);
+	TEST_ASSERT(*guest_memfd > 0, "guest_memfd creation failed");
+
+	*mem = add_memslot(vm, test_page_size, *guest_memfd);
+
+	virt_map(vm, GUEST_MEMFD_SHARING_TEST_GVA, GUEST_MEMFD_SHARING_TEST_GPA,
+		 test_nr_pages);
+
+	return vm;
+}
+
+static void cleanup_test(size_t guest_memfd_size, struct kvm_vm *vm,
+			 int guest_memfd, char *mem)
+{
+	kvm_vm_free(vm);
+	TEST_ASSERT_EQ(munmap(mem, guest_memfd_size), 0);
+
+	if (guest_memfd > -1)
+		TEST_ASSERT_EQ(close(guest_memfd), 0);
+}
+
+static void test_sharing(void)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+	int guest_memfd;
+	char *mem;
+
+	vm = setup_test(PAGE_SIZE, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+
+	host_use_memory(mem, 'X', 'A');
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'A', 'B', 0);
+
+	/* Toggle private flag of memory attributes and run the test again. */
+	guest_memfd_convert_private(guest_memfd, 0, PAGE_SIZE);
+
+	assert_host_cannot_fault(mem);
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'B', 'C', 0);
+
+	guest_memfd_convert_shared(guest_memfd, 0, PAGE_SIZE);
+
+	host_use_memory(mem, 'C', 'D');
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'D', 'E', 0);
+
+	cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+}
+
+static void test_init_mappable_false(void)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+	int guest_memfd;
+	char *mem;
+
+	vm = setup_test(PAGE_SIZE, /*init_private=*/true, &vcpu, &guest_memfd, &mem);
+
+	assert_host_cannot_fault(mem);
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
+
+	guest_memfd_convert_shared(guest_memfd, 0, PAGE_SIZE);
+
+	host_use_memory(mem, 'A', 'B');
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'B', 'C', 0);
+
+	cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+}
+
+/*
+ * Test that even if there are no folios yet, conversion requests are recorded
+ * in guest_memfd.
+ */
+static void test_conversion_before_allocation(void)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+	int guest_memfd;
+	char *mem;
+
+	vm = setup_test(PAGE_SIZE, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+
+	guest_memfd_convert_private(guest_memfd, 0, PAGE_SIZE);
+
+	assert_host_cannot_fault(mem);
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
+
+	guest_memfd_convert_shared(guest_memfd, 0, PAGE_SIZE);
+
+	host_use_memory(mem, 'A', 'B');
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'B', 'C', 0);
+
+	cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+}
+
+static void __test_conversion_if_not_all_folios_allocated(int total_nr_pages,
+							  int page_to_fault)
+{
+	const int second_page_to_fault = 8;
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+	size_t total_size;
+	int guest_memfd;
+	char *mem;
+	int i;
+
+	total_size = PAGE_SIZE * total_nr_pages;
+	vm = setup_test(total_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+
+	/*
+	 * Fault 2 of the pages to test filemap range operations except when
+	 * page_to_fault == second_page_to_fault.
+	 */
+	host_use_memory(mem + page_to_fault * PAGE_SIZE, 'X', 'A');
+	host_use_memory(mem + second_page_to_fault * PAGE_SIZE, 'X', 'A');
+
+	guest_memfd_convert_private(guest_memfd, 0, total_size);
+
+	for (i = 0; i < total_nr_pages; ++i) {
+		bool is_faulted;
+		char expected;
+
+		assert_host_cannot_fault(mem + i * PAGE_SIZE);
+
+		is_faulted = i == page_to_fault || i == second_page_to_fault;
+		expected = is_faulted ? 'A' : 'X';
+		guest_use_memory(vcpu,
+				 GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+				 expected, 'B', 0);
+	}
+
+	guest_memfd_convert_shared(guest_memfd, 0, total_size);
+
+	for (i = 0; i < total_nr_pages; ++i) {
+		host_use_memory(mem + i * PAGE_SIZE, 'B', 'C');
+		guest_use_memory(vcpu,
+				 GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+				 'C', 'D', 0);
+	}
+
+	cleanup_test(total_size, vm, guest_memfd, mem);
+}
+
+static void test_conversion_if_not_all_folios_allocated(void)
+{
+	const int total_nr_pages = 16;
+	int i;
+
+	for (i = 0; i < total_nr_pages; ++i)
+		__test_conversion_if_not_all_folios_allocated(total_nr_pages, i);
+}
+
+static void test_conversions_should_not_affect_surrounding_pages(void)
+{
+	struct kvm_vcpu *vcpu;
+	int page_to_convert;
+	struct kvm_vm *vm;
+	size_t total_size;
+	int guest_memfd;
+	int nr_pages;
+	char *mem;
+	int i;
+
+	page_to_convert = 2;
+	nr_pages = 4;
+	total_size = PAGE_SIZE * nr_pages;
+
+	vm = setup_test(total_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+
+	for (i = 0; i < nr_pages; ++i) {
+		host_use_memory(mem + i * PAGE_SIZE, 'X', 'A');
+		guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+				 'A', 'B', 0);
+	}
+
+	guest_memfd_convert_private(guest_memfd, PAGE_SIZE * page_to_convert, PAGE_SIZE);
+
+
+	for (i = 0; i < nr_pages; ++i) {
+		char to_check;
+
+		if (i == page_to_convert) {
+			assert_host_cannot_fault(mem + i * PAGE_SIZE);
+			to_check = 'B';
+		} else {
+			host_use_memory(mem + i * PAGE_SIZE, 'B', 'C');
+			to_check = 'C';
+		}
+
+		guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+				 to_check, 'D', 0);
+	}
+
+	guest_memfd_convert_shared(guest_memfd, PAGE_SIZE * page_to_convert, PAGE_SIZE);
+
+
+	for (i = 0; i < nr_pages; ++i) {
+		host_use_memory(mem + i * PAGE_SIZE, 'D', 'E');
+		guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+				 'E', 'F', 0);
+	}
+
+	cleanup_test(total_size, vm, guest_memfd, mem);
+}
+
+static void __test_conversions_should_fail_if_memory_has_elevated_refcount(
+	int nr_pages, int page_to_convert)
+{
+	struct kvm_vcpu *vcpu;
+	loff_t error_offset;
+	struct kvm_vm *vm;
+	size_t total_size;
+	int guest_memfd;
+	char *mem;
+	int ret;
+	int i;
+
+	total_size = PAGE_SIZE * nr_pages;
+	vm = setup_test(total_size, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+
+	pin_pages(mem + page_to_convert * PAGE_SIZE, PAGE_SIZE);
+
+	for (i = 0; i < nr_pages; i++) {
+		host_use_memory(mem + i * PAGE_SIZE, 'X', 'A');
+		guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+				 'A', 'B', 0);
+	}
+
+	error_offset = 0;
+	ret = __guest_memfd_convert_private(guest_memfd, page_to_convert * PAGE_SIZE,
+					    PAGE_SIZE, &error_offset);
+	TEST_ASSERT_EQ(ret, -1);
+	TEST_ASSERT_EQ(errno, EAGAIN);
+	TEST_ASSERT_EQ(error_offset, page_to_convert * PAGE_SIZE);
+
+	unpin_pages();
+
+	guest_memfd_convert_private(guest_memfd, page_to_convert * PAGE_SIZE, PAGE_SIZE);
+
+	for (i = 0; i < nr_pages; i++) {
+		char expected;
+
+		if (i == page_to_convert)
+			assert_host_cannot_fault(mem + i * PAGE_SIZE);
+		else
+			host_use_memory(mem + i * PAGE_SIZE, 'B', 'C');
+
+		expected = i == page_to_convert ? 'X' : 'C';
+		guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+				 expected, 'D', 0);
+	}
+
+	guest_memfd_convert_shared(guest_memfd, page_to_convert * PAGE_SIZE, PAGE_SIZE);
+
+
+	for (i = 0; i < nr_pages; i++) {
+		char expected = i == page_to_convert ? 'X' : 'D';
+
+		host_use_memory(mem + i * PAGE_SIZE, expected, 'E');
+		guest_use_memory(vcpu,
+				 GUEST_MEMFD_SHARING_TEST_GVA + i * PAGE_SIZE,
+				 'E', 'F', 0);
+	}
+
+	cleanup_test(total_size, vm, guest_memfd, mem);
+}
+/*
+ * This test depends on CONFIG_GUP_TEST to provide a kernel module that exposes
+ * pin_user_pages() to userspace.
+ */
+static void test_conversions_should_fail_if_memory_has_elevated_refcount(void)
+{
+	int i;
+
+	for (i = 0; i < 4; i++)
+		__test_conversions_should_fail_if_memory_has_elevated_refcount(4, i);
+}
+
+static void test_truncate_should_not_change_mappability(void)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+	int guest_memfd;
+	char *mem;
+	int ret;
+
+	vm = setup_test(PAGE_SIZE, /*init_private=*/false, &vcpu, &guest_memfd, &mem);
+
+	host_use_memory(mem, 'X', 'A');
+
+	ret = fallocate(guest_memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			0, PAGE_SIZE);
+	TEST_ASSERT(!ret, "truncating the first page should succeed");
+
+	host_use_memory(mem, 'X', 'A');
+
+	guest_memfd_convert_private(guest_memfd, 0, PAGE_SIZE);
+
+	assert_host_cannot_fault(mem);
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'A', 'A', 0);
+
+	ret = fallocate(guest_memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+			0, PAGE_SIZE);
+	TEST_ASSERT(!ret, "truncating the first page should succeed");
+
+	assert_host_cannot_fault(mem);
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
+
+	cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+}
+
+static void test_fault_type_independent_of_mem_attributes(void)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+	int guest_memfd;
+	char *mem;
+
+	vm = setup_test(PAGE_SIZE, /*init_private=*/true, &vcpu, &guest_memfd, &mem);
+	vm_mem_set_shared(vm, GUEST_MEMFD_SHARING_TEST_GPA, PAGE_SIZE);
+
+	/*
+	 * kvm->mem_attr_array set to shared, guest_memfd memory initialized as
+	 * private.
+	 */
+
+	/* Host cannot use private memory. */
+	assert_host_cannot_fault(mem);
+
+	/* Guest can fault and use memory. */
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
+
+	guest_memfd_convert_shared(guest_memfd, 0, PAGE_SIZE);
+	vm_mem_set_private(vm, GUEST_MEMFD_SHARING_TEST_GPA, PAGE_SIZE);
+
+	/* Host can use shared memory. */
+	host_use_memory(mem, 'X', 'A');
+
+	/* Guest can also use shared memory. */
+	guest_use_memory(vcpu, GUEST_MEMFD_SHARING_TEST_GVA, 'X', 'A', 0);
+
+	cleanup_test(PAGE_SIZE, vm, guest_memfd, mem);
+}
+
+int main(int argc, char *argv[])
+{
+	TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM));
+	TEST_REQUIRE(kvm_check_cap(KVM_CAP_GMEM_SHARED_MEM));
+	TEST_REQUIRE(kvm_check_cap(KVM_CAP_GMEM_CONVERSION));
+
+	test_sharing();
+	test_init_mappable_false();
+	test_conversion_before_allocation();
+	test_conversion_if_not_all_folios_allocated();
+	test_conversions_should_not_affect_surrounding_pages();
+	test_truncate_should_not_change_mappability();
+	test_conversions_should_fail_if_memory_has_elevated_refcount();
+	test_fault_type_independent_of_mem_attributes();
+
+	return 0;
+}
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 853ab68cff79..ffe0625f2d71 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -18,11 +18,13 @@
 #include <asm/atomic.h>
 #include <asm/kvm.h>
 
+#include <string.h>
 #include <sys/ioctl.h>
 
 #include "kvm_util_arch.h"
 #include "kvm_util_types.h"
 #include "sparsebit.h"
+#include <sys/types.h>
 
 #define KVM_DEV_PATH "/dev/kvm"
 #define KVM_MAX_VCPUS 512
@@ -426,6 +428,78 @@ static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
 	vm_set_memory_attributes(vm, gpa, size, 0);
 }
 
+static inline int __guest_memfd_convert_private(int guest_memfd, loff_t offset,
+						size_t size, loff_t *error_offset)
+{
+	int ret;
+
+	struct kvm_gmem_convert param = {
+		.offset = offset,
+		.size = size,
+		.error_offset = 0,
+	};
+
+	ret = ioctl(guest_memfd, KVM_GMEM_CONVERT_PRIVATE, &param);
+	if (ret)
+		*error_offset = param.error_offset;
+
+	return ret;
+}
+
+static inline void guest_memfd_convert_private(int guest_memfd, loff_t offset,
+					       size_t size)
+{
+	loff_t error_offset;
+	int retries;
+	int ret;
+
+	retries = 2;
+	do {
+		error_offset = 0;
+		ret = __guest_memfd_convert_private(guest_memfd, offset, size,
+						    &error_offset);
+	} while (ret == -1 && errno == EAGAIN && --retries > 0);
+
+	TEST_ASSERT(!ret, "Unexpected error %s (%m) at offset 0x%lx",
+		    strerrorname_np(errno), error_offset);
+}
+
+static inline int __guest_memfd_convert_shared(int guest_memfd, loff_t offset,
+					       size_t size, loff_t *error_offset)
+{
+	int ret;
+
+	struct kvm_gmem_convert param = {
+		.offset = offset,
+		.size = size,
+		.error_offset = 0,
+	};
+
+	ret = ioctl(guest_memfd, KVM_GMEM_CONVERT_SHARED, &param);
+	if (ret)
+		*error_offset = param.error_offset;
+
+	return ret;
+}
+
+static inline void guest_memfd_convert_shared(int guest_memfd, loff_t offset,
+					      size_t size)
+{
+	loff_t error_offset;
+	int retries;
+	int ret;
+
+	retries = 2;
+	do {
+		error_offset = 0;
+		ret = __guest_memfd_convert_shared(guest_memfd, offset, size,
+						    &error_offset);
+	} while (ret == -1 && errno == EAGAIN && --retries > 0);
+
+	TEST_ASSERT(!ret, "Unexpected error %s (%m) at offset 0x%lx",
+		    strerrorname_np(errno), error_offset);
+}
+
 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
 			    bool punch_hole);
 
-- 
2.49.0.1045.g170613ef41-goog


  parent reply	other threads:[~2025-05-14 23:43 UTC|newest]

Thread overview: 231+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-14 23:41 [RFC PATCH v2 00/51] 1G page support for guest_memfd Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 01/51] KVM: guest_memfd: Make guest mem use guest mem inodes instead of anonymous inodes Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 02/51] KVM: guest_memfd: Introduce and use shareability to guard faulting Ackerley Tng
2025-05-27  3:54   ` Yan Zhao
2025-05-29 18:20     ` Ackerley Tng
2025-05-30  8:53     ` Fuad Tabba
2025-05-30 18:32       ` Ackerley Tng
2025-06-02  9:43         ` Fuad Tabba
2025-05-27  8:25   ` Binbin Wu
2025-05-27  8:43     ` Binbin Wu
2025-05-29 18:26     ` Ackerley Tng
2025-05-29 20:37       ` Ackerley Tng
2025-05-29  5:42   ` Michael Roth
2025-06-11 21:51     ` Ackerley Tng
2025-07-02 23:25       ` Michael Roth
2025-07-03  0:46         ` Vishal Annapurve
2025-07-03  0:52           ` Vishal Annapurve
2025-07-03  4:12           ` Michael Roth
2025-07-03  5:10             ` Vishal Annapurve
2025-07-03 20:39               ` Michael Roth
2025-07-07 14:55                 ` Vishal Annapurve
2025-07-12  0:10                   ` Michael Roth
2025-07-12 17:53                     ` Vishal Annapurve
2025-08-12  8:23             ` Fuad Tabba
2025-08-13 17:11               ` Ira Weiny
2025-06-11 22:10     ` Ackerley Tng
2025-08-01  0:01   ` Yan Zhao
2025-08-14 21:35     ` Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 03/51] KVM: selftests: Update guest_memfd_test for INIT_PRIVATE flag Ackerley Tng
2025-05-15 13:49   ` Ira Weiny
2025-05-16 17:42     ` Ackerley Tng
2025-05-16 19:31       ` Ira Weiny
2025-05-27  8:53       ` Binbin Wu
2025-05-30 19:59         ` Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 04/51] KVM: guest_memfd: Introduce KVM_GMEM_CONVERT_SHARED/PRIVATE ioctls Ackerley Tng
2025-05-15 14:50   ` Ira Weiny
2025-05-16 17:53     ` Ackerley Tng
2025-05-20  9:22   ` Fuad Tabba
2025-05-20 13:02     ` Vishal Annapurve
2025-05-20 13:44       ` Fuad Tabba
2025-05-20 14:11         ` Vishal Annapurve
2025-05-20 14:33           ` Fuad Tabba
2025-05-20 16:02             ` Vishal Annapurve
2025-05-20 18:05               ` Fuad Tabba
2025-05-20 19:40                 ` Ackerley Tng
2025-05-21 12:36                   ` Fuad Tabba
2025-05-21 14:42                     ` Vishal Annapurve
2025-05-21 15:21                       ` Fuad Tabba
2025-05-21 15:51                         ` Vishal Annapurve
2025-05-21 18:27                           ` Fuad Tabba
2025-05-22 14:52                             ` Sean Christopherson
2025-05-22 15:07                               ` Fuad Tabba
2025-05-22 16:26                                 ` Sean Christopherson
2025-05-23 10:12                                   ` Fuad Tabba
2025-06-24  8:23           ` Alexey Kardashevskiy
2025-06-24 13:08             ` Jason Gunthorpe
2025-06-24 14:10               ` Vishal Annapurve
2025-06-27  4:49                 ` Alexey Kardashevskiy
2025-06-27 15:17                   ` Vishal Annapurve
2025-06-30  0:19                     ` Alexey Kardashevskiy
2025-06-30 14:19                       ` Vishal Annapurve
2025-07-10  6:57                         ` Alexey Kardashevskiy
2025-07-10 17:58                           ` Jason Gunthorpe
2025-07-02  8:35                 ` Yan Zhao
2025-07-02 13:54                   ` Vishal Annapurve
2025-07-02 14:13                     ` Jason Gunthorpe
2025-07-02 14:32                       ` Vishal Annapurve
2025-07-10 10:50                         ` Xu Yilun
2025-07-10 17:54                           ` Jason Gunthorpe
2025-07-11  4:31                             ` Xu Yilun
2025-07-11  9:33                               ` Xu Yilun
2025-07-16 22:22                   ` Ackerley Tng
2025-07-17  9:32                     ` Xu Yilun
2025-07-17 16:56                       ` Ackerley Tng
2025-07-18  2:48                         ` Xu Yilun
2025-07-18 14:15                           ` Jason Gunthorpe
2025-07-21 14:18                             ` Xu Yilun
2025-07-18 15:13                           ` Ira Weiny
2025-07-21  9:58                             ` Xu Yilun
2025-07-22 18:17                               ` Ackerley Tng
2025-07-22 19:25                                 ` Edgecombe, Rick P
2025-05-28  3:16   ` Binbin Wu
2025-05-30 20:10     ` Ackerley Tng
2025-06-03  0:54       ` Binbin Wu
2025-05-14 23:41 ` [RFC PATCH v2 05/51] KVM: guest_memfd: Skip LRU for guest_memfd folios Ackerley Tng
2025-05-28  7:01   ` Binbin Wu
2025-05-30 20:32     ` Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 06/51] KVM: Query guest_memfd for private/shared status Ackerley Tng
2025-05-27  3:55   ` Yan Zhao
2025-05-28  8:08     ` Binbin Wu
2025-05-28  9:55       ` Yan Zhao
2025-05-14 23:41 ` [RFC PATCH v2 07/51] KVM: guest_memfd: Add CAP KVM_CAP_GMEM_CONVERSION Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 08/51] KVM: selftests: Test flag validity after guest_memfd supports conversions Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 09/51] KVM: selftests: Test faulting with respect to GUEST_MEMFD_FLAG_INIT_PRIVATE Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 10/51] KVM: selftests: Refactor vm_mem_add to be more flexible Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 11/51] KVM: selftests: Allow cleanup of ucall_pool from host Ackerley Tng
2025-05-14 23:41 ` Ackerley Tng [this message]
2025-05-14 23:41 ` [RFC PATCH v2 13/51] KVM: selftests: Add script to exercise private_mem_conversions_test Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 14/51] KVM: selftests: Update private_mem_conversions_test to mmap guest_memfd Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 15/51] KVM: selftests: Update script to map shared memory from guest_memfd Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 16/51] mm: hugetlb: Consolidate interpretation of gbl_chg within alloc_hugetlb_folio() Ackerley Tng
2025-05-15  2:09   ` Matthew Wilcox
2025-05-28  8:55   ` Binbin Wu
2025-07-07 18:27   ` James Houghton
2025-05-14 23:41 ` [RFC PATCH v2 17/51] mm: hugetlb: Cleanup interpretation of gbl_chg in alloc_hugetlb_folio() Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 18/51] mm: hugetlb: Cleanup interpretation of map_chg_state within alloc_hugetlb_folio() Ackerley Tng
2025-07-07 18:08   ` James Houghton
2025-05-14 23:41 ` [RFC PATCH v2 19/51] mm: hugetlb: Rename alloc_surplus_hugetlb_folio Ackerley Tng
2025-05-14 23:41 ` [RFC PATCH v2 20/51] mm: mempolicy: Refactor out policy_node_nodemask() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 21/51] mm: hugetlb: Inline huge_node() into callers Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 22/51] mm: hugetlb: Refactor hugetlb allocation functions Ackerley Tng
2025-05-31 23:45   ` Ira Weiny
2025-06-13 22:03     ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 23/51] mm: hugetlb: Refactor out hugetlb_alloc_folio() Ackerley Tng
2025-06-01  0:38   ` Ira Weiny
2025-06-13 22:07     ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 24/51] mm: hugetlb: Add option to create new subpool without using surplus Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 25/51] mm: truncate: Expose preparation steps for truncate_inode_pages_final Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 26/51] mm: Consolidate freeing of typed folios on final folio_put() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 27/51] mm: hugetlb: Expose hugetlb_subpool_{get,put}_pages() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 28/51] mm: Introduce guestmem_hugetlb to support folio_put() handling of guestmem pages Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 29/51] mm: guestmem_hugetlb: Wrap HugeTLB as an allocator for guest_memfd Ackerley Tng
2025-05-16 14:07   ` Ackerley Tng
2025-05-16 20:33     ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 30/51] mm: truncate: Expose truncate_inode_folio() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 31/51] KVM: x86: Set disallow_lpage on base_gfn and guest_memfd pgoff misalignment Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 32/51] KVM: guest_memfd: Support guestmem_hugetlb as custom allocator Ackerley Tng
2025-05-23 10:47   ` Yan Zhao
2025-08-12  9:13   ` Tony Lindgren
2025-05-14 23:42 ` [RFC PATCH v2 33/51] KVM: guest_memfd: Allocate and truncate from " Ackerley Tng
2025-05-21 18:05   ` Vishal Annapurve
2025-05-22 23:12   ` Edgecombe, Rick P
2025-05-28 10:58   ` Yan Zhao
2025-06-03  7:43   ` Binbin Wu
2025-07-16 22:13     ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 34/51] mm: hugetlb: Add functions to add/delete folio from hugetlb lists Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 35/51] mm: guestmem_hugetlb: Add support for splitting and merging pages Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 36/51] mm: Convert split_folio() macro to function Ackerley Tng
2025-05-21 16:40   ` Edgecombe, Rick P
2025-05-14 23:42 ` [RFC PATCH v2 37/51] filemap: Pass address_space mapping to ->free_folio() Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 38/51] KVM: guest_memfd: Split allocator pages for guest_memfd use Ackerley Tng
2025-05-22 22:19   ` Edgecombe, Rick P
2025-06-05 17:15     ` Ackerley Tng
2025-06-05 17:53       ` Edgecombe, Rick P
2025-06-05 17:15     ` Ackerley Tng
2025-06-05 17:16     ` Ackerley Tng
2025-06-05 17:16     ` Ackerley Tng
2025-06-05 17:16     ` Ackerley Tng
2025-05-27  4:30   ` Yan Zhao
2025-05-27  4:38     ` Yan Zhao
2025-06-05 17:50     ` Ackerley Tng
2025-05-27  8:45   ` Yan Zhao
2025-06-05 19:10     ` Ackerley Tng
2025-06-16 11:15       ` Yan Zhao
2025-06-05  5:24   ` Binbin Wu
2025-06-05 19:16     ` Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 39/51] KVM: guest_memfd: Merge and truncate on fallocate(PUNCH_HOLE) Ackerley Tng
2025-05-28 11:00   ` Yan Zhao
2025-05-28 16:39     ` Ackerley Tng
2025-05-29  3:26       ` Yan Zhao
2025-05-14 23:42 ` [RFC PATCH v2 40/51] KVM: guest_memfd: Update kvm_gmem_mapping_order to account for page status Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 41/51] KVM: Add CAP to indicate support for HugeTLB as custom allocator Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 42/51] KVM: selftests: Add basic selftests for hugetlb-backed guest_memfd Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 43/51] KVM: selftests: Update conversion flows test for HugeTLB Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 44/51] KVM: selftests: Test truncation paths of guest_memfd Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 45/51] KVM: selftests: Test allocation and conversion of subfolios Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 46/51] KVM: selftests: Test that guest_memfd usage is reported via hugetlb Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 47/51] KVM: selftests: Support various types of backing sources for private memory Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 48/51] KVM: selftests: Update test for various private memory backing source types Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 49/51] KVM: selftests: Update private_mem_conversions_test.sh to test with HugeTLB pages Ackerley Tng
2025-05-14 23:42 ` [RFC PATCH v2 50/51] KVM: selftests: Add script to test HugeTLB statistics Ackerley Tng
2025-05-15 18:03 ` [RFC PATCH v2 00/51] 1G page support for guest_memfd Edgecombe, Rick P
2025-05-15 18:42   ` Vishal Annapurve
2025-05-15 23:35     ` Edgecombe, Rick P
2025-05-16  0:57       ` Sean Christopherson
2025-05-16  2:12         ` Edgecombe, Rick P
2025-05-16 13:11           ` Vishal Annapurve
2025-05-16 16:45             ` Edgecombe, Rick P
2025-05-16 17:51               ` Sean Christopherson
2025-05-16 19:14                 ` Edgecombe, Rick P
2025-05-16 20:25                   ` Dave Hansen
2025-05-16 21:42                     ` Edgecombe, Rick P
2025-05-16 17:45             ` Sean Christopherson
2025-05-16 13:09         ` Jason Gunthorpe
2025-05-16 17:04           ` Edgecombe, Rick P
2025-05-16  0:22 ` [RFC PATCH v2 51/51] KVM: selftests: Test guest_memfd for accuracy of st_blocks Ackerley Tng
2025-05-16 19:48 ` [RFC PATCH v2 00/51] 1G page support for guest_memfd Ira Weiny
2025-05-16 19:59   ` Ira Weiny
2025-05-16 20:26     ` Ackerley Tng
2025-05-16 22:43 ` Ackerley Tng
2025-06-19  8:13 ` Yan Zhao
2025-06-19  8:59   ` Xiaoyao Li
2025-06-19  9:18     ` Xiaoyao Li
2025-06-19  9:28       ` Yan Zhao
2025-06-19  9:45         ` Xiaoyao Li
2025-06-19  9:49           ` Xiaoyao Li
2025-06-29 18:28     ` Vishal Annapurve
2025-06-30  3:14       ` Yan Zhao
2025-06-30 14:14         ` Vishal Annapurve
2025-07-01  5:23           ` Yan Zhao
2025-07-01 19:48             ` Vishal Annapurve
2025-07-07 23:25               ` Sean Christopherson
2025-07-08  0:14                 ` Vishal Annapurve
2025-07-08  1:08                   ` Edgecombe, Rick P
2025-07-08 14:20                     ` Sean Christopherson
2025-07-08 14:52                       ` Edgecombe, Rick P
2025-07-08 15:07                         ` Vishal Annapurve
2025-07-08 15:31                           ` Edgecombe, Rick P
2025-07-08 17:16                             ` Vishal Annapurve
2025-07-08 17:39                               ` Edgecombe, Rick P
2025-07-08 18:03                                 ` Sean Christopherson
2025-07-08 18:13                                   ` Edgecombe, Rick P
2025-07-08 18:55                                     ` Sean Christopherson
2025-07-08 21:23                                       ` Edgecombe, Rick P
2025-07-09 14:28                                       ` Vishal Annapurve
2025-07-09 15:00                                         ` Sean Christopherson
2025-07-10  1:30                                           ` Vishal Annapurve
2025-07-10 23:33                                             ` Sean Christopherson
2025-07-11 21:18                                             ` Vishal Annapurve
2025-07-12 17:33                                               ` Vishal Annapurve
2025-07-09 15:17                                         ` Edgecombe, Rick P
2025-07-10  3:39                                           ` Vishal Annapurve
2025-07-08 19:28                                   ` Vishal Annapurve
2025-07-08 19:58                                     ` Sean Christopherson
2025-07-08 22:54                                       ` Vishal Annapurve
2025-07-08 15:38                           ` Sean Christopherson
2025-07-08 16:22                             ` Fuad Tabba
2025-07-08 17:25                               ` Sean Christopherson
2025-07-08 18:37                                 ` Fuad Tabba
2025-07-16 23:06                                   ` Ackerley Tng
2025-06-26 23:19 ` Ackerley Tng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=baa8838f623102931e755cf34c86314b305af49c.1747264138.git.ackerleytng@google.com \
    --to=ackerleytng@google.com \
    --cc=aik@amd.com \
    --cc=ajones@ventanamicro.com \
    --cc=akpm@linux-foundation.org \
    --cc=amoorthy@google.com \
    --cc=anthony.yznaga@oracle.com \
    --cc=anup@brainfault.org \
    --cc=aou@eecs.berkeley.edu \
    --cc=bfoster@redhat.com \
    --cc=binbin.wu@linux.intel.com \
    --cc=brauner@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=chao.p.peng@intel.com \
    --cc=chenhuacai@kernel.org \
    --cc=dave.hansen@intel.com \
    --cc=david@redhat.com \
    --cc=dmatlack@google.com \
    --cc=dwmw@amazon.co.uk \
    --cc=erdemaktas@google.com \
    --cc=fan.du@intel.com \
    --cc=fvdl@google.com \
    --cc=graf@amazon.com \
    --cc=haibo1.xu@intel.com \
    --cc=hch@infradead.org \
    --cc=hughd@google.com \
    --cc=ira.weiny@intel.com \
    --cc=isaku.yamahata@intel.com \
    --cc=jack@suse.cz \
    --cc=james.morse@arm.com \
    --cc=jarkko@kernel.org \
    --cc=jgg@ziepe.ca \
    --cc=jgowans@amazon.com \
    --cc=jhubbard@nvidia.com \
    --cc=jroedel@suse.de \
    --cc=jthoughton@google.com \
    --cc=jun.miao@intel.com \
    --cc=kai.huang@intel.com \
    --cc=keirf@google.com \
    --cc=kent.overstreet@linux.dev \
    --cc=kirill.shutemov@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=liam.merwick@oracle.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maciej.wieczor-retman@intel.com \
    --cc=mail@maciej.szmigiero.name \
    --cc=maz@kernel.org \
    --cc=mic@digikod.net \
    --cc=michael.roth@amd.com \
    --cc=mpe@ellerman.id.au \
    --cc=muchun.song@linux.dev \
    --cc=nikunj@amd.com \
    --cc=nsaenz@amazon.es \
    --cc=oliver.upton@linux.dev \
    --cc=palmer@dabbelt.com \
    --cc=pankaj.gupta@amd.com \
    --cc=paul.walmsley@sifive.com \
    --cc=pbonzini@redhat.com \
    --cc=pdurrant@amazon.co.uk \
    --cc=peterx@redhat.com \
    --cc=pgonda@google.com \
    --cc=pvorel@suse.cz \
    --cc=qperret@google.com \
    --cc=quic_cvanscha@quicinc.com \
    --cc=quic_eberman@quicinc.com \
    --cc=quic_mnalajal@quicinc.com \
    --cc=quic_pderrin@quicinc.com \
    --cc=quic_pheragu@quicinc.com \
    --cc=quic_svaddagi@quicinc.com \
    --cc=quic_tsoni@quicinc.com \
    --cc=richard.weiyang@gmail.com \
    --cc=rick.p.edgecombe@intel.com \
    --cc=rientjes@google.com \
    --cc=roypat@amazon.co.uk \
    --cc=rppt@kernel.org \
    --cc=seanjc@google.com \
    --cc=shuah@kernel.org \
    --cc=steven.price@arm.com \
    --cc=steven.sistare@oracle.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=thomas.lendacky@amd.com \
    --cc=usama.arif@bytedance.com \
    --cc=vannapurve@google.com \
    --cc=vbabka@suse.cz \
    --cc=viro@zeniv.linux.org.uk \
    --cc=vkuznets@redhat.com \
    --cc=wei.w.wang@intel.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=xiaoyao.li@intel.com \
    --cc=yan.y.zhao@intel.com \
    --cc=yilun.xu@intel.com \
    --cc=yuzenghui@huawei.com \
    --cc=zhiquan1.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).