From: Anish Moorthy <amoorthy@google.com>
To: seanjc@google.com, oliver.upton@linux.dev, kvm@vger.kernel.org,
kvmarm@lists.linux.dev
Cc: pbonzini@redhat.com, maz@kernel.org, robert.hoo.linux@gmail.com,
jthoughton@google.com, amoorthy@google.com, bgardon@google.com,
dmatlack@google.com, ricarkol@google.com,
axelrasmussen@google.com, peterx@redhat.com,
nadav.amit@gmail.com, isaku.yamahata@gmail.com
Subject: [PATCH v4 16/16] KVM: selftests: Handle memory fault exits in demand_paging_test
Date: Fri, 2 Jun 2023 16:19:21 +0000 [thread overview]
Message-ID: <20230602161921.208564-17-amoorthy@google.com> (raw)
In-Reply-To: <20230602161921.208564-1-amoorthy@google.com>
Demonstrate a (very basic) scheme for supporting memory fault exits.
From the vCPU threads:
1. Simply issue UFFDIO_COPY/CONTINUEs in response to memory fault exits,
with the purpose of establishing the absent mappings. Do so with
wake_waiters=false to avoid serializing on the userfaultfd wait queue
locks.
2. When the UFFDIO_COPY/CONTINUE in (1) fails with EEXIST,
assume that the mapping was already established but is currently
absent [A] and attempt to populate it using MADV_POPULATE_WRITE.
Issue UFFDIO_COPY/CONTINUEs from the reader threads as well, but with
wake_waiters=true to ensure that any threads sleeping on the uffd are
eventually woken up.
A real VMM would track whether it had already COPY/CONTINUEd pages (eg,
via a bitmap) to avoid calls destined to EEXIST. However, even the
naive approach is enough to demonstrate the performance advantages of
KVM_EXIT_MEMORY_FAULT.
[A] In reality it is much likelier that the vCPU thread simply lost a
race to establish the mapping for the page.
Signed-off-by: Anish Moorthy <amoorthy@google.com>
Acked-by: James Houghton <jthoughton@google.com>
---
.../selftests/kvm/demand_paging_test.c | 235 +++++++++++++-----
1 file changed, 166 insertions(+), 69 deletions(-)
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index ffbc89300c46..4b79c88cb22d 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -15,6 +15,7 @@
#include <time.h>
#include <pthread.h>
#include <linux/userfaultfd.h>
+#include <linux/mman.h>
#include <sys/syscall.h>
#include "kvm_util.h"
@@ -31,36 +32,99 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;
+static int num_uffds;
+static size_t uffd_region_size;
+static struct uffd_desc **uffd_descs;
+/*
+ * Delay when demand paging is performed through userfaultfd or directly by
+ * vcpu_worker in the case of a KVM_EXIT_MEMORY_FAULT.
+ */
+static useconds_t uffd_delay;
+static int uffd_mode;
+
+
+static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t hva,
+ bool is_vcpu);
+
+static void madv_write_or_err(uint64_t gpa)
+{
+ int r;
+ void *hva = addr_gpa2hva(memstress_args.vm, gpa);
+
+ r = madvise(hva, demand_paging_size, MADV_POPULATE_WRITE);
+ TEST_ASSERT(r == 0,
+ "MADV_POPULATE_WRITE on hva 0x%lx (gpa 0x%lx) fail, errno %i\n",
+ (uintptr_t) hva, gpa, errno);
+}
+
+static void ready_page(uint64_t gpa)
+{
+ int r, uffd;
+
+ /*
+ * This test only registers memslot 1 w/ userfaultfd. Any accesses outside
+ * the registered ranges should fault in the physical pages through
+ * MADV_POPULATE_WRITE.
+ */
+ if ((gpa < memstress_args.gpa)
+ || (gpa >= memstress_args.gpa + memstress_args.size)) {
+ madv_write_or_err(gpa);
+ } else {
+ if (uffd_delay)
+ usleep(uffd_delay);
+
+ uffd = uffd_descs[(gpa - memstress_args.gpa) / uffd_region_size]->uffd;
+
+ r = handle_uffd_page_request(uffd_mode, uffd,
+ (uint64_t) addr_gpa2hva(memstress_args.vm, gpa), true);
+
+ if (r == EEXIST)
+ madv_write_or_err(gpa);
+ }
+}
+
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
struct kvm_run *run = vcpu->run;
- struct timespec start;
- struct timespec ts_diff;
+ struct timespec last_start;
+ struct timespec total_runtime = {};
int ret;
- clock_gettime(CLOCK_MONOTONIC, &start);
- /* Let the guest access its memory */
- ret = _vcpu_run(vcpu);
- TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
- TEST_ASSERT(false,
- "Invalid guest sync status: exit_reason=%s\n",
- exit_reason_str(run->exit_reason));
- }
+ while (true) {
+ clock_gettime(CLOCK_MONOTONIC, &last_start);
+ /* Let the guest access its memory */
+ ret = _vcpu_run(vcpu);
+ TEST_ASSERT(ret == 0
+ || (errno == EFAULT
+ && run->exit_reason == KVM_EXIT_MEMORY_FAULT),
+ "vcpu_run failed: %d\n", ret);
- ts_diff = timespec_elapsed(start);
+ total_runtime = timespec_add(total_runtime,
+ timespec_elapsed(last_start));
+ if (ret != 0 && get_ucall(vcpu, NULL) != UCALL_SYNC) {
+
+ if (run->exit_reason == KVM_EXIT_MEMORY_FAULT) {
+ ready_page(run->memory_fault.gpa);
+ continue;
+ }
+
+ TEST_ASSERT(false,
+ "Invalid guest sync status: exit_reason=%s\n",
+ exit_reason_str(run->exit_reason));
+ }
+ break;
+ }
PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_idx,
- ts_diff.tv_sec, ts_diff.tv_nsec);
+ total_runtime.tv_sec, total_runtime.tv_nsec);
}
-static int handle_uffd_page_request(int uffd_mode, int uffd,
- struct uffd_msg *msg)
+static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t hva,
+ bool is_vcpu)
{
pid_t tid = syscall(__NR_gettid);
- uint64_t addr = msg->arg.pagefault.address;
struct timespec start;
struct timespec ts_diff;
int r;
@@ -71,16 +135,15 @@ static int handle_uffd_page_request(int uffd_mode, int uffd,
struct uffdio_copy copy;
copy.src = (uint64_t)guest_data_prototype;
- copy.dst = addr;
+ copy.dst = hva;
copy.len = demand_paging_size;
- copy.mode = 0;
+ copy.mode = is_vcpu ? UFFDIO_COPY_MODE_DONTWAKE : 0;
- r = ioctl(uffd, UFFDIO_COPY, ©);
/*
- * With multiple vCPU threads fault on a single page and there are
- * multiple readers for the UFFD, at least one of the UFFDIO_COPYs
- * will fail with EEXIST: handle that case without signaling an
- * error.
+ * With multiple vCPU threads and at least one of multiple reader threads
+ * or vCPU memory faults, multiple vCPUs accessing an absent page will
+ * almost certainly cause some thread doing the UFFDIO_COPY here to get
+ * EEXIST: make sure to allow that case.
*
* Note that this also suppress any EEXISTs occurring from,
* e.g., the first UFFDIO_COPY/CONTINUEs on a page. That never
@@ -88,23 +151,24 @@ static int handle_uffd_page_request(int uffd_mode, int uffd,
* some external state to correctly surface EEXISTs to userspace
* (or prevent duplicate COPY/CONTINUEs in the first place).
*/
- if (r == -1 && errno != EEXIST) {
- pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d, errno = %d\n",
- addr, tid, errno);
- return r;
- }
+ r = ioctl(uffd, UFFDIO_COPY, ©);
+ TEST_ASSERT(r == 0 || errno == EEXIST,
+ "Thread 0x%x failed UFFDIO_COPY on hva 0x%lx, errno = %d",
+ tid, hva, errno);
} else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
+ /* The comments in the UFFDIO_COPY branch also apply here. */
struct uffdio_continue cont = {0};
- cont.range.start = addr;
+ cont.range.start = hva;
cont.range.len = demand_paging_size;
+ cont.mode = is_vcpu ? UFFDIO_CONTINUE_MODE_DONTWAKE : 0;
r = ioctl(uffd, UFFDIO_CONTINUE, &cont);
/*
- * With multiple vCPU threads fault on a single page and there are
- * multiple readers for the UFFD, at least one of the UFFDIO_COPYs
- * will fail with EEXIST: handle that case without signaling an
- * error.
+ * With multiple vCPU threads and at least one of multiple reader threads
+ * or vCPU memory faults, multiple vCPUs accessing an absent page will
+ * almost certainly cause some thread doing the UFFDIO_COPY here to get
+ * EEXIST: make sure to allow that case.
*
* Note that this also suppress any EEXISTs occurring from,
* e.g., the first UFFDIO_COPY/CONTINUEs on a page. That never
@@ -112,32 +176,54 @@ static int handle_uffd_page_request(int uffd_mode, int uffd,
* some external state to correctly surface EEXISTs to userspace
* (or prevent duplicate COPY/CONTINUEs in the first place).
*/
- if (r == -1 && errno != EEXIST) {
- pr_info("Failed UFFDIO_CONTINUE in 0x%lx, thread %d, errno = %d\n",
- addr, tid, errno);
- return r;
- }
+ TEST_ASSERT(r == 0 || errno == EEXIST,
+ "Thread 0x%x failed UFFDIO_CONTINUE on hva 0x%lx, errno = %d",
+ tid, hva, errno);
} else {
TEST_FAIL("Invalid uffd mode %d", uffd_mode);
}
+ /*
+ * If the above UFFDIO_COPY/CONTINUE failed with EEXIST, waiting threads
+ * will not have been woken: wake them here.
+ */
+ if (!is_vcpu && r != 0) {
+ struct uffdio_range range = {
+ .start = hva,
+ .len = demand_paging_size
+ };
+ r = ioctl(uffd, UFFDIO_WAKE, &range);
+ TEST_ASSERT(r == 0,
+ "Thread 0x%x failed UFFDIO_WAKE on hva 0x%lx, errno = %d",
+ tid, hva, errno);
+ }
+
ts_diff = timespec_elapsed(start);
PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid,
timespec_to_ns(ts_diff));
PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
- demand_paging_size, addr, tid);
+ demand_paging_size, hva, tid);
return 0;
}
+static int handle_uffd_page_request_from_uffd(int uffd_mode, int uffd,
+ struct uffd_msg *msg)
+{
+ TEST_ASSERT(msg->event == UFFD_EVENT_PAGEFAULT,
+ "Received uffd message with event %d != UFFD_EVENT_PAGEFAULT",
+ msg->event);
+ return handle_uffd_page_request(uffd_mode, uffd,
+ msg->arg.pagefault.address, false);
+}
+
struct test_params {
- int uffd_mode;
bool single_uffd;
- useconds_t uffd_delay;
int readers_per_uffd;
enum vm_mem_backing_src_type src_type;
bool partition_vcpu_memory_access;
+ bool memfault_exits;
};
static void prefault_mem(void *alias, uint64_t len)
@@ -155,16 +241,22 @@ static void run_test(enum vm_guest_mode mode, void *arg)
{
struct memstress_vcpu_args *vcpu_args;
struct test_params *p = arg;
- struct uffd_desc **uffd_descs = NULL;
struct timespec start;
struct timespec ts_diff;
struct kvm_vm *vm;
- int i, num_uffds = 0;
+ int i;
double vcpu_paging_rate;
- uint64_t uffd_region_size;
+ uint32_t slot_flags = 0;
+ bool uffd_memfault_exits = uffd_mode && p->memfault_exits;
- vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, 0,
- p->src_type, p->partition_vcpu_memory_access);
+ if (uffd_memfault_exits) {
+ TEST_ASSERT(kvm_has_cap(KVM_CAP_NOWAIT_ON_FAULT) > 0,
+ "KVM does not have KVM_CAP_NOWAIT_ON_FAULT");
+ slot_flags = KVM_MEM_NOWAIT_ON_FAULT;
+ }
+
+ vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
+ 1, slot_flags, p->src_type, p->partition_vcpu_memory_access);
demand_paging_size = get_backing_src_pagesz(p->src_type);
@@ -173,21 +265,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
"Failed to allocate buffer for guest data pattern");
memset(guest_data_prototype, 0xAB, demand_paging_size);
- if (p->uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
- num_uffds = p->single_uffd ? 1 : nr_vcpus;
- for (i = 0; i < num_uffds; i++) {
- vcpu_args = &memstress_args.vcpu_args[i];
- prefault_mem(addr_gpa2alias(vm, vcpu_args->gpa),
- vcpu_args->pages * memstress_args.guest_page_size);
- }
- }
-
- if (p->uffd_mode) {
+ if (uffd_mode) {
num_uffds = p->single_uffd ? 1 : nr_vcpus;
uffd_region_size = nr_vcpus * guest_percpu_mem_size / num_uffds;
+ if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
+ for (i = 0; i < num_uffds; i++) {
+ vcpu_args = &memstress_args.vcpu_args[i];
+ prefault_mem(addr_gpa2alias(vm, vcpu_args->gpa),
+ uffd_region_size);
+ }
+ }
+
uffd_descs = malloc(num_uffds * sizeof(struct uffd_desc *));
- TEST_ASSERT(uffd_descs, "Memory allocation failed");
+ TEST_ASSERT(uffd_descs, "Failed to allocate uffd descriptors");
+
for (i = 0; i < num_uffds; i++) {
struct memstress_vcpu_args *vcpu_args;
void *vcpu_hva;
@@ -201,10 +293,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* requests.
*/
uffd_descs[i] = uffd_setup_demand_paging(
- p->uffd_mode, p->uffd_delay, vcpu_hva,
+ uffd_mode, uffd_delay, vcpu_hva,
uffd_region_size,
p->readers_per_uffd,
- &handle_uffd_page_request);
+ &handle_uffd_page_request_from_uffd);
}
}
@@ -218,7 +310,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n");
- if (p->uffd_mode) {
+ if (uffd_mode) {
/* Tell the user fault fd handler threads to quit */
for (i = 0; i < num_uffds; i++)
uffd_stop_demand_paging(uffd_descs[i]);
@@ -239,7 +331,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
memstress_destroy_vm(vm);
free(guest_data_prototype);
- if (p->uffd_mode)
+ if (uffd_mode)
free(uffd_descs);
}
@@ -248,7 +340,7 @@ static void help(char *name)
puts("");
printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-a]\n"
" [-d uffd_delay_usec] [-r readers_per_uffd] [-b memory]\n"
- " [-s type] [-v vcpus] [-o]\n", name);
+ " [-w] [-s type] [-v vcpus] [-o]\n", name);
guest_modes_help();
printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
" UFFD registration mode: 'MISSING' or 'MINOR'.\n");
@@ -259,6 +351,7 @@ static void help(char *name)
" FD handler to simulate demand paging\n"
" overheads. Ignored without -u.\n");
printf(" -r: Set the number of reader threads per uffd.\n");
+ printf(" -w: Enable kvm cap for memory fault exits.\n");
printf(" -b: specify the size of the memory region which should be\n"
" demand paged by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
@@ -278,29 +371,30 @@ int main(int argc, char *argv[])
.partition_vcpu_memory_access = true,
.readers_per_uffd = 1,
.single_uffd = false,
+ .memfault_exits = false,
};
int opt;
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "ahom:u:d:b:s:v:r:")) != -1) {
+ while ((opt = getopt(argc, argv, "ahowm:u:d:b:s:v:r:")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
break;
case 'u':
if (!strcmp("MISSING", optarg))
- p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
+ uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
else if (!strcmp("MINOR", optarg))
- p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
- TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
+ uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
+ TEST_ASSERT(uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
break;
case 'a':
p.single_uffd = true;
break;
case 'd':
- p.uffd_delay = strtoul(optarg, NULL, 0);
- TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported.");
+ uffd_delay = strtoul(optarg, NULL, 0);
+ TEST_ASSERT(uffd_delay >= 0, "A negative UFFD delay is not supported.");
break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
@@ -323,6 +417,9 @@ int main(int argc, char *argv[])
"Invalid number of readers per uffd %d: must be >=1",
p.readers_per_uffd);
break;
+ case 'w':
+ p.memfault_exits = true;
+ break;
case 'h':
default:
help(argv[0]);
@@ -330,7 +427,7 @@ int main(int argc, char *argv[])
}
}
- if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
+ if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
!backing_src_is_shared(p.src_type)) {
TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
}
--
2.41.0.rc0.172.g3f132b7071-goog
next prev parent reply other threads:[~2023-06-02 16:20 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-02 16:19 [PATCH v4 00/16] Improve scalability of KVM + userfaultfd live migration via annotated memory faults Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 01/16] KVM: Allow hva_pfn_fast() to resolve read-only faults Anish Moorthy
2023-06-14 14:39 ` Sean Christopherson
2023-06-14 16:57 ` Anish Moorthy
2023-08-10 19:54 ` Anish Moorthy
2023-08-10 23:48 ` Sean Christopherson
2023-06-02 16:19 ` [PATCH v4 02/16] KVM: x86: Set vCPU exit reason to KVM_EXIT_UNKNOWN at the start of KVM_RUN Anish Moorthy
2023-06-02 20:30 ` Isaku Yamahata
2023-06-05 16:41 ` Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 03/16] KVM: Add KVM_CAP_MEMORY_FAULT_INFO Anish Moorthy
2023-06-03 16:58 ` Isaku Yamahata
2023-06-05 16:37 ` Anish Moorthy
2023-06-14 14:55 ` Sean Christopherson
2023-06-05 17:46 ` Anish Moorthy
2023-06-14 17:35 ` Sean Christopherson
2023-06-20 21:13 ` Anish Moorthy
2023-07-07 11:50 ` Kautuk Consul
2023-07-10 15:00 ` Anish Moorthy
2023-07-11 3:54 ` Kautuk Consul
2023-07-11 14:25 ` Sean Christopherson
2023-08-11 22:12 ` Anish Moorthy
2023-08-14 18:01 ` Sean Christopherson
2023-08-15 0:06 ` Anish Moorthy
2023-08-15 0:43 ` Sean Christopherson
2023-08-15 17:01 ` Anish Moorthy
2023-08-16 15:58 ` Sean Christopherson
2023-08-16 21:28 ` Anish Moorthy
2023-08-17 23:58 ` Sean Christopherson
2023-08-18 17:32 ` Anish Moorthy
2023-08-23 22:20 ` Sean Christopherson
2023-08-23 23:38 ` Anish Moorthy
2023-08-24 17:24 ` Sean Christopherson
2023-08-17 22:55 ` Anish Moorthy
2023-07-05 8:21 ` Kautuk Consul
2023-06-02 16:19 ` [PATCH v4 04/16] KVM: Add docstrings to __kvm_write_guest_page() and __kvm_read_guest_page() Anish Moorthy
2023-06-15 2:41 ` Robert Hoo
2023-08-14 22:51 ` Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 05/16] KVM: Annotate -EFAULTs from kvm_vcpu_write_guest_page() Anish Moorthy
2023-06-14 19:10 ` Sean Christopherson
2023-07-06 22:51 ` Anish Moorthy
2023-07-12 14:08 ` Sean Christopherson
2023-06-02 16:19 ` [PATCH v4 06/16] KVM: Annotate -EFAULTs from kvm_vcpu_read_guest_page() Anish Moorthy
2023-06-14 19:22 ` Sean Christopherson
2023-07-07 17:35 ` Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 07/16] KVM: Simplify error handling in __gfn_to_pfn_memslot() Anish Moorthy
2023-06-14 19:26 ` Sean Christopherson
2023-07-07 17:33 ` Anish Moorthy
2023-07-10 17:40 ` Sean Christopherson
2023-06-02 16:19 ` [PATCH v4 08/16] KVM: x86: Annotate -EFAULTs from kvm_handle_error_pfn() Anish Moorthy
2023-06-14 20:03 ` Sean Christopherson
2023-07-07 18:05 ` Anish Moorthy
2023-06-15 2:43 ` Robert Hoo
2023-06-15 14:40 ` Sean Christopherson
2023-06-02 16:19 ` [PATCH v4 09/16] KVM: Introduce KVM_CAP_NOWAIT_ON_FAULT without implementation Anish Moorthy
2023-06-14 20:11 ` Sean Christopherson
2023-07-06 19:04 ` Anish Moorthy
2023-06-14 21:20 ` Sean Christopherson
2023-06-14 21:23 ` Sean Christopherson
2023-08-23 21:17 ` Anish Moorthy
2023-06-15 3:55 ` Wang, Wei W
2023-06-15 14:56 ` Sean Christopherson
2023-06-16 12:08 ` Wang, Wei W
2023-07-07 18:13 ` Anish Moorthy
2023-07-07 20:07 ` Anish Moorthy
2023-07-11 15:29 ` Sean Christopherson
2023-08-25 0:15 ` Anish Moorthy
2023-08-29 22:41 ` Sean Christopherson
2023-08-30 16:21 ` Anish Moorthy
2023-09-07 21:17 ` Sean Christopherson
2023-06-02 16:19 ` [PATCH v4 10/16] KVM: x86: Implement KVM_CAP_NOWAIT_ON_FAULT Anish Moorthy
2023-06-14 20:25 ` Sean Christopherson
2023-07-07 17:41 ` Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 11/16] KVM: arm64: " Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 12/16] KVM: selftests: Report per-vcpu demand paging rate from demand paging test Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 13/16] KVM: selftests: Allow many vCPUs and reader threads per UFFD in " Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 14/16] KVM: selftests: Use EPOLL in userfaultfd_util reader threads and signal errors via TEST_ASSERT Anish Moorthy
2023-06-02 16:19 ` [PATCH v4 15/16] KVM: selftests: Add memslot_flags parameter to memstress_create_vm() Anish Moorthy
2023-06-02 16:19 ` Anish Moorthy [this message]
2023-06-20 2:44 ` [PATCH v4 16/16] KVM: selftests: Handle memory fault exits in demand_paging_test Robert Hoo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230602161921.208564-17-amoorthy@google.com \
--to=amoorthy@google.com \
--cc=axelrasmussen@google.com \
--cc=bgardon@google.com \
--cc=dmatlack@google.com \
--cc=isaku.yamahata@gmail.com \
--cc=jthoughton@google.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=maz@kernel.org \
--cc=nadav.amit@gmail.com \
--cc=oliver.upton@linux.dev \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=ricarkol@google.com \
--cc=robert.hoo.linux@gmail.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.