* [PATCH 3/3 V6] selftest: KVM: Add intra host migration
@ 2021-08-30 21:29 Peter Gonda
0 siblings, 0 replies; 8+ messages in thread
From: Peter Gonda @ 2021-08-30 21:29 UTC (permalink / raw)
To: kvm; +Cc: Peter Gonda, Sean Christopherson, Marc Orr, Brijesh Singh,
linux-kernel
Adds testcases for intra host migration for SEV and SEV-ES. Also adds
locking test to confirm no deadlock exists.
---
tools/testing/selftests/kvm/Makefile | 1 +
.../selftests/kvm/x86_64/sev_vm_tests.c | 152 ++++++++++++++++++
2 files changed, 153 insertions(+)
create mode 100644 tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
Signed-off-by: Peter Gonda <pgonda@google.com>
Suggested-by: Sean Christopherson <seanjc@google.com>
Cc: Marc Orr <marcorr@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 5832f510a16c..de6e64d5c9c4 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
+TEST_GEN_PROGS_x86_64 += x86_64/sev_vm_tests
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
diff --git a/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
new file mode 100644
index 000000000000..50a770316628
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kvm.h>
+#include <linux/psp-sev.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <pthread.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "kvm_util.h"
+#include "kselftest.h"
+#include "../lib/kvm_util_internal.h"
+
+#define SEV_DEV_PATH "/dev/sev"
+
+/*
+ * Open SEV_DEV_PATH if available, otherwise exit the entire program.
+ *
+ * Input Args:
+ * flags - The flags to pass when opening SEV_DEV_PATH.
+ *
+ * Return:
+ * The opened file descriptor of /dev/sev.
+ */
+static int open_sev_dev_path_or_exit(int flags)
+{
+ static int fd;
+
+ if (fd != 0)
+ return fd;
+
+ fd = open(SEV_DEV_PATH, flags);
+ if (fd < 0) {
+ print_skip("%s not available, is SEV not enabled? (errno: %d)",
+ SEV_DEV_PATH, errno);
+ exit(KSFT_SKIP);
+ }
+
+ return fd;
+}
+
+static void sev_ioctl(int fd, int cmd_id, void *data)
+{
+ struct kvm_sev_cmd cmd = { 0 };
+ int ret;
+
+ TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX, "Unknown SEV CMD : %d\n", cmd_id);
+
+ cmd.id = cmd_id;
+ cmd.sev_fd = open_sev_dev_path_or_exit(0);
+ cmd.data = (uint64_t)data;
+ ret = ioctl(fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
+ TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
+ "%d failed: return code: %d, errno: %d, fw error: %d",
+ cmd_id, ret, errno, cmd.error);
+}
+
+static struct kvm_vm *sev_vm_create(bool es)
+{
+ struct kvm_vm *vm;
+ struct kvm_sev_launch_start start = { 0 };
+ int i;
+
+ vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
+ for (i = 0; i < 3; ++i)
+ vm_vcpu_add(vm, i);
+ start.policy |= (es) << 2;
+ sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
+ if (es)
+ sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+ return vm;
+}
+
+static void test_sev_migrate_from(bool es)
+{
+ struct kvm_vm *vms[3];
+ struct kvm_enable_cap cap = { 0 };
+ int i;
+
+ for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i)
+ vms[i] = sev_vm_create(es);
+
+ cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
+ for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *) - 1; ++i) {
+ cap.args[0] = vms[i]->fd;
+ vm_enable_cap(vms[i + 1], &cap);
+ }
+}
+
+#define LOCK_TESTING_THREADS 3
+
+struct locking_thread_input {
+ struct kvm_vm *vm;
+ int source_fds[LOCK_TESTING_THREADS];
+};
+
+static void *locking_test_thread(void *arg)
+{
+ struct kvm_enable_cap cap = { 0 };
+ int i, j;
+ struct locking_thread_input *input = (struct locking_test_thread *)arg;
+
+ cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
+
+ for (i = 0; i < 1000; ++i) {
+ j = input->source_fds[i % LOCK_TESTING_THREADS];
+ cap.args[0] = input->source_fds[j];
+ /*
+ * Call IOCTL directly without checking return code. We are
+ * simply trying to confirm there is no deadlock from userspace
+ * not check correctness of migration here.
+ */
+ ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap);
+ }
+}
+
+static void test_sev_migrate_locking(void)
+{
+ struct locking_thread_input input[LOCK_TESTING_THREADS];
+ pthread_t pt[LOCK_TESTING_THREADS];
+ int i;
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i) {
+ input[i].vm = sev_vm_create(/* es= */ false);
+ input[0].source_fds[i] = input[i].vm->fd;
+ }
+ memcpy(input[1].source_fds, input[0].source_fds,
+ sizeof(input[1].source_fds));
+ memcpy(input[2].source_fds, input[0].source_fds,
+ sizeof(input[2].source_fds));
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i)
+ pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i)
+ pthread_join(pt[i], NULL);
+}
+
+int main(int argc, char *argv[])
+{
+ test_sev_migrate_from(/* es= */ false);
+ test_sev_migrate_from(/* es= */ true);
+ test_sev_migrate_locking();
+ return 0;
+}
--
2.33.0.259.gc128427fd7-goog
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 3/3 V6] selftest: KVM: Add intra host migration
@ 2021-08-30 21:29 Peter Gonda
2021-08-31 13:24 ` Marc Orr
0 siblings, 1 reply; 8+ messages in thread
From: Peter Gonda @ 2021-08-30 21:29 UTC (permalink / raw)
To: kvm; +Cc: Peter Gonda, Sean Christopherson, Marc Orr, Brijesh Singh,
linux-kernel
Adds testcases for intra host migration for SEV and SEV-ES. Also adds
locking test to confirm no deadlock exists.
---
tools/testing/selftests/kvm/Makefile | 1 +
.../selftests/kvm/x86_64/sev_vm_tests.c | 152 ++++++++++++++++++
2 files changed, 153 insertions(+)
create mode 100644 tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
Signed-off-by: Peter Gonda <pgonda@google.com>
Suggested-by: Sean Christopherson <seanjc@google.com>
Cc: Marc Orr <marcorr@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 5832f510a16c..de6e64d5c9c4 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
+TEST_GEN_PROGS_x86_64 += x86_64/sev_vm_tests
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
diff --git a/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
new file mode 100644
index 000000000000..50a770316628
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kvm.h>
+#include <linux/psp-sev.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <pthread.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "kvm_util.h"
+#include "kselftest.h"
+#include "../lib/kvm_util_internal.h"
+
+#define SEV_DEV_PATH "/dev/sev"
+
+/*
+ * Open SEV_DEV_PATH if available, otherwise exit the entire program.
+ *
+ * Input Args:
+ * flags - The flags to pass when opening SEV_DEV_PATH.
+ *
+ * Return:
+ * The opened file descriptor of /dev/sev.
+ */
+static int open_sev_dev_path_or_exit(int flags)
+{
+ static int fd;
+
+ if (fd != 0)
+ return fd;
+
+ fd = open(SEV_DEV_PATH, flags);
+ if (fd < 0) {
+ print_skip("%s not available, is SEV not enabled? (errno: %d)",
+ SEV_DEV_PATH, errno);
+ exit(KSFT_SKIP);
+ }
+
+ return fd;
+}
+
+static void sev_ioctl(int fd, int cmd_id, void *data)
+{
+ struct kvm_sev_cmd cmd = { 0 };
+ int ret;
+
+ TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX, "Unknown SEV CMD : %d\n", cmd_id);
+
+ cmd.id = cmd_id;
+ cmd.sev_fd = open_sev_dev_path_or_exit(0);
+ cmd.data = (uint64_t)data;
+ ret = ioctl(fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
+ TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
+ "%d failed: return code: %d, errno: %d, fw error: %d",
+ cmd_id, ret, errno, cmd.error);
+}
+
+static struct kvm_vm *sev_vm_create(bool es)
+{
+ struct kvm_vm *vm;
+ struct kvm_sev_launch_start start = { 0 };
+ int i;
+
+ vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
+ for (i = 0; i < 3; ++i)
+ vm_vcpu_add(vm, i);
+ start.policy |= (es) << 2;
+ sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
+ if (es)
+ sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+ return vm;
+}
+
+static void test_sev_migrate_from(bool es)
+{
+ struct kvm_vm *vms[3];
+ struct kvm_enable_cap cap = { 0 };
+ int i;
+
+ for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i)
+ vms[i] = sev_vm_create(es);
+
+ cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
+ for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *) - 1; ++i) {
+ cap.args[0] = vms[i]->fd;
+ vm_enable_cap(vms[i + 1], &cap);
+ }
+}
+
+#define LOCK_TESTING_THREADS 3
+
+struct locking_thread_input {
+ struct kvm_vm *vm;
+ int source_fds[LOCK_TESTING_THREADS];
+};
+
+static void *locking_test_thread(void *arg)
+{
+ struct kvm_enable_cap cap = { 0 };
+ int i, j;
+ struct locking_thread_input *input = (struct locking_test_thread *)arg;
+
+ cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
+
+ for (i = 0; i < 1000; ++i) {
+ j = input->source_fds[i % LOCK_TESTING_THREADS];
+ cap.args[0] = input->source_fds[j];
+ /*
+ * Call IOCTL directly without checking return code. We are
+ * simply trying to confirm there is no deadlock from userspace
+ * not check correctness of migration here.
+ */
+ ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap);
+ }
+}
+
+static void test_sev_migrate_locking(void)
+{
+ struct locking_thread_input input[LOCK_TESTING_THREADS];
+ pthread_t pt[LOCK_TESTING_THREADS];
+ int i;
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i) {
+ input[i].vm = sev_vm_create(/* es= */ false);
+ input[0].source_fds[i] = input[i].vm->fd;
+ }
+ memcpy(input[1].source_fds, input[0].source_fds,
+ sizeof(input[1].source_fds));
+ memcpy(input[2].source_fds, input[0].source_fds,
+ sizeof(input[2].source_fds));
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i)
+ pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i)
+ pthread_join(pt[i], NULL);
+}
+
+int main(int argc, char *argv[])
+{
+ test_sev_migrate_from(/* es= */ false);
+ test_sev_migrate_from(/* es= */ true);
+ test_sev_migrate_locking();
+ return 0;
+}
--
2.33.0.259.gc128427fd7-goog
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH 3/3 V6] selftest: KVM: Add intra host migration
2021-08-30 21:29 Peter Gonda
@ 2021-08-31 13:24 ` Marc Orr
2021-08-31 13:25 ` Marc Orr
0 siblings, 1 reply; 8+ messages in thread
From: Marc Orr @ 2021-08-31 13:24 UTC (permalink / raw)
To: Peter Gonda; +Cc: kvm list, Sean Christopherson, Brijesh Singh, linux-kernel
On Mon, Aug 30, 2021 at 2:29 PM Peter Gonda <pgonda@google.com> wrote:
>
> Adds testcases for intra host migration for SEV and SEV-ES. Also adds
> locking test to confirm no deadlock exists.
>
> ---
> tools/testing/selftests/kvm/Makefile | 1 +
> .../selftests/kvm/x86_64/sev_vm_tests.c | 152 ++++++++++++++++++
> 2 files changed, 153 insertions(+)
> create mode 100644 tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
>
> Signed-off-by: Peter Gonda <pgonda@google.com>
> Suggested-by: Sean Christopherson <seanjc@google.com>
> Cc: Marc Orr <marcorr@google.com>
> Cc: Sean Christopherson <seanjc@google.com>
> Cc: Brijesh Singh <brijesh.singh@amd.com>
> Cc: kvm@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
>
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index 5832f510a16c..de6e64d5c9c4 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
> TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
> TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
> TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
> +TEST_GEN_PROGS_x86_64 += x86_64/sev_vm_tests
> TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
> TEST_GEN_PROGS_x86_64 += demand_paging_test
> TEST_GEN_PROGS_x86_64 += dirty_log_test
> diff --git a/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
> new file mode 100644
> index 000000000000..50a770316628
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
> @@ -0,0 +1,150 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +#include <linux/kvm.h>
> +#include <linux/psp-sev.h>
> +#include <stdio.h>
> +#include <sys/ioctl.h>
> +#include <stdlib.h>
> +#include <errno.h>
> +#include <pthread.h>
> +
> +#include "test_util.h"
> +#include "kvm_util.h"
> +#include "processor.h"
> +#include "svm_util.h"
> +#include "kvm_util.h"
> +#include "kselftest.h"
> +#include "../lib/kvm_util_internal.h"
> +
> +#define SEV_DEV_PATH "/dev/sev"
> +
> +/*
> + * Open SEV_DEV_PATH if available, otherwise exit the entire program.
> + *
> + * Input Args:
> + * flags - The flags to pass when opening SEV_DEV_PATH.
> + *
> + * Return:
> + * The opened file descriptor of /dev/sev.
> + */
> +static int open_sev_dev_path_or_exit(int flags)
> +{
> + static int fd;
> +
> + if (fd != 0)
> + return fd;
> +
> + fd = open(SEV_DEV_PATH, flags);
> + if (fd < 0) {
> + print_skip("%s not available, is SEV not enabled? (errno: %d)",
> + SEV_DEV_PATH, errno);
> + exit(KSFT_SKIP);
> + }
> +
> + return fd;
> +}
> +
> +static void sev_ioctl(int fd, int cmd_id, void *data)
> +{
> + struct kvm_sev_cmd cmd = { 0 };
> + int ret;
> +
> + TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX, "Unknown SEV CMD : %d\n", cmd_id);
> +
> + cmd.id = cmd_id;
> + cmd.sev_fd = open_sev_dev_path_or_exit(0);
> + cmd.data = (uint64_t)data;
> + ret = ioctl(fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
> + TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
> + "%d failed: return code: %d, errno: %d, fw error: %d",
> + cmd_id, ret, errno, cmd.error);
> +}
nit: Since this function has two file descriptors, `fd` and
`cmd.sev_fd`, can we rename `fd` to `vm_fd`?
> +
> +static struct kvm_vm *sev_vm_create(bool es)
> +{
> + struct kvm_vm *vm;
> + struct kvm_sev_launch_start start = { 0 };
> + int i;
> +
> + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
> + sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
> + for (i = 0; i < 3; ++i)
nit: Consider moving `3` to a macro, like `MAX_VCPU_IDX` or maybe
better defining something like `NUM_VCPUS` to be 4.
> + vm_vcpu_add(vm, i);
> + start.policy |= (es) << 2;
> + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
> + if (es)
> + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
> + return vm;
> +}
> +
> +static void test_sev_migrate_from(bool es)
> +{
> + struct kvm_vm *vms[3];
If we create a `NUM_VCPUS` macro, then we can use it here.
> + struct kvm_enable_cap cap = { 0 };
> + int i;
> +
> + for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i)
> + vms[i] = sev_vm_create(es);
> +
> + cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
> + for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *) - 1; ++i) {
> + cap.args[0] = vms[i]->fd;
> + vm_enable_cap(vms[i + 1], &cap);
> + }
nit/optional: To me, the code would be more clear if we combined this
loop with the one above and guarded calling `vm_enable_cap()` with `if
(i > 0)`. Also, maybe we can initialize `cap` when it's declared.
struct kvm_enable_cap cap = { .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM };
int i;
for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i) {
vms[i] = sev_vm_create(es);
if (i > 0)
vm_enable_cap(vms[i], &cap);
}
> +}
> +
> +#define LOCK_TESTING_THREADS 3
nit: Consider moving this macro to the top of the file.
> +
> +struct locking_thread_input {
> + struct kvm_vm *vm;
> + int source_fds[LOCK_TESTING_THREADS];
> +};
> +
> +static void *locking_test_thread(void *arg)
> +{
> + struct kvm_enable_cap cap = { 0 };
Maybe:
struct kvm_enable_cap cap = { .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM };
> + int i, j;
> + struct locking_thread_input *input = (struct locking_test_thread *)arg;
> +
> + cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
If we initialize the cap field during the declaration, then this line goes away.
> +
> + for (i = 0; i < 1000; ++i) {
> + j = input->source_fds[i % LOCK_TESTING_THREADS];
> + cap.args[0] = input->source_fds[j];
> + /*
> + * Call IOCTL directly without checking return code. We are
> + * simply trying to confirm there is no deadlock from userspace
> + * not check correctness of migration here.
> + */
> + ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap);
Should we use `vm_enable_cap()` here?
> + }
> +}
> +
> +static void test_sev_migrate_locking(void)
> +{
> + struct locking_thread_input input[LOCK_TESTING_THREADS];
> + pthread_t pt[LOCK_TESTING_THREADS];
> + int i;
> +
> + for (i = 0; i < LOCK_TESTING_THREADS; ++i) {
> + input[i].vm = sev_vm_create(/* es= */ false);
> + input[0].source_fds[i] = input[i].vm->fd;
> + }
> + memcpy(input[1].source_fds, input[0].source_fds,
> + sizeof(input[1].source_fds));
> + memcpy(input[2].source_fds, input[0].source_fds,
> + sizeof(input[2].source_fds));
> +
> + for (i = 0; i < LOCK_TESTING_THREADS; ++i)
> + pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
> +
> + for (i = 0; i < LOCK_TESTING_THREADS; ++i)
> + pthread_join(pt[i], NULL);
> +}
I think this function/test case deserves a comment to capture some of
the conversation we had on the list that led to Sean suggesting this
test case. Speaking of which, should this test case have a
Suggested-by tag for Sean, since he suggested this test?
> +
> +int main(int argc, char *argv[])
> +{
> + test_sev_migrate_from(/* es= */ false);
> + test_sev_migrate_from(/* es= */ true);
> + test_sev_migrate_locking();
> + return 0;
> +}
> --
> 2.33.0.259.gc128427fd7-goog
>
Nice test!
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH 3/3 V6] selftest: KVM: Add intra host migration
2021-08-31 13:24 ` Marc Orr
@ 2021-08-31 13:25 ` Marc Orr
2021-08-31 15:03 ` Peter Gonda
0 siblings, 1 reply; 8+ messages in thread
From: Marc Orr @ 2021-08-31 13:25 UTC (permalink / raw)
To: Peter Gonda; +Cc: kvm list, Sean Christopherson, Brijesh Singh, linux-kernel
On Tue, Aug 31, 2021 at 6:24 AM Marc Orr <marcorr@google.com> wrote:
>
> On Mon, Aug 30, 2021 at 2:29 PM Peter Gonda <pgonda@google.com> wrote:
> >
> > Adds testcases for intra host migration for SEV and SEV-ES. Also adds
> > locking test to confirm no deadlock exists.
> >
> > ---
> > tools/testing/selftests/kvm/Makefile | 1 +
> > .../selftests/kvm/x86_64/sev_vm_tests.c | 152 ++++++++++++++++++
> > 2 files changed, 153 insertions(+)
> > create mode 100644 tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
> >
> > Signed-off-by: Peter Gonda <pgonda@google.com>
> > Suggested-by: Sean Christopherson <seanjc@google.com>
> > Cc: Marc Orr <marcorr@google.com>
> > Cc: Sean Christopherson <seanjc@google.com>
> > Cc: Brijesh Singh <brijesh.singh@amd.com>
> > Cc: kvm@vger.kernel.org
> > Cc: linux-kernel@vger.kernel.org
> >
> > diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> > index 5832f510a16c..de6e64d5c9c4 100644
> > --- a/tools/testing/selftests/kvm/Makefile
> > +++ b/tools/testing/selftests/kvm/Makefile
> > @@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
> > TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
> > TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
> > TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
> > +TEST_GEN_PROGS_x86_64 += x86_64/sev_vm_tests
> > TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
> > TEST_GEN_PROGS_x86_64 += demand_paging_test
> > TEST_GEN_PROGS_x86_64 += dirty_log_test
> > diff --git a/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
> > new file mode 100644
> > index 000000000000..50a770316628
> > --- /dev/null
> > +++ b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
> > @@ -0,0 +1,150 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +#include <linux/kvm.h>
> > +#include <linux/psp-sev.h>
> > +#include <stdio.h>
> > +#include <sys/ioctl.h>
> > +#include <stdlib.h>
> > +#include <errno.h>
> > +#include <pthread.h>
> > +
> > +#include "test_util.h"
> > +#include "kvm_util.h"
> > +#include "processor.h"
> > +#include "svm_util.h"
> > +#include "kvm_util.h"
> > +#include "kselftest.h"
> > +#include "../lib/kvm_util_internal.h"
> > +
> > +#define SEV_DEV_PATH "/dev/sev"
> > +
> > +/*
> > + * Open SEV_DEV_PATH if available, otherwise exit the entire program.
> > + *
> > + * Input Args:
> > + * flags - The flags to pass when opening SEV_DEV_PATH.
> > + *
> > + * Return:
> > + * The opened file descriptor of /dev/sev.
> > + */
> > +static int open_sev_dev_path_or_exit(int flags)
> > +{
> > + static int fd;
> > +
> > + if (fd != 0)
> > + return fd;
> > +
> > + fd = open(SEV_DEV_PATH, flags);
> > + if (fd < 0) {
> > + print_skip("%s not available, is SEV not enabled? (errno: %d)",
> > + SEV_DEV_PATH, errno);
> > + exit(KSFT_SKIP);
> > + }
> > +
> > + return fd;
> > +}
> > +
> > +static void sev_ioctl(int fd, int cmd_id, void *data)
> > +{
> > + struct kvm_sev_cmd cmd = { 0 };
> > + int ret;
> > +
> > + TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX, "Unknown SEV CMD : %d\n", cmd_id);
> > +
> > + cmd.id = cmd_id;
> > + cmd.sev_fd = open_sev_dev_path_or_exit(0);
> > + cmd.data = (uint64_t)data;
> > + ret = ioctl(fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
> > + TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
> > + "%d failed: return code: %d, errno: %d, fw error: %d",
> > + cmd_id, ret, errno, cmd.error);
> > +}
>
> nit: Since this function has two file descriptors, `fd` and
> `cmd.sev_fd`, can we rename `fd` to `vm_fd`?
>
> > +
> > +static struct kvm_vm *sev_vm_create(bool es)
> > +{
> > + struct kvm_vm *vm;
> > + struct kvm_sev_launch_start start = { 0 };
> > + int i;
> > +
> > + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
> > + sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
> > + for (i = 0; i < 3; ++i)
>
> nit: Consider moving `3` to a macro, like `MAX_VCPU_IDX` or maybe
> better defining something like `NUM_VCPUS` to be 4.
>
> > + vm_vcpu_add(vm, i);
> > + start.policy |= (es) << 2;
> > + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
> > + if (es)
> > + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
> > + return vm;
> > +}
> > +
> > +static void test_sev_migrate_from(bool es)
> > +{
> > + struct kvm_vm *vms[3];
>
> If we create a `NUM_VCPUS` macro, then we can use it here.
>
> > + struct kvm_enable_cap cap = { 0 };
> > + int i;
> > +
> > + for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i)
> > + vms[i] = sev_vm_create(es);
> > +
> > + cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
> > + for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *) - 1; ++i) {
> > + cap.args[0] = vms[i]->fd;
> > + vm_enable_cap(vms[i + 1], &cap);
> > + }
>
> nit/optional: To me, the code would be more clear if we combined this
> loop with the one above and guarded calling `vm_enable_cap()` with `if
> (i > 0)`. Also, maybe we can initialize `cap` when it's declared.
>
> struct kvm_enable_cap cap = { .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM };
> int i;
>
> for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i) {
> vms[i] = sev_vm_create(es);
> if (i > 0)
> vm_enable_cap(vms[i], &cap);
> }
>
> > +}
> > +
> > +#define LOCK_TESTING_THREADS 3
>
> nit: Consider moving this macro to the top of the file.
>
> > +
> > +struct locking_thread_input {
> > + struct kvm_vm *vm;
> > + int source_fds[LOCK_TESTING_THREADS];
> > +};
> > +
> > +static void *locking_test_thread(void *arg)
> > +{
> > + struct kvm_enable_cap cap = { 0 };
>
> Maybe:
> struct kvm_enable_cap cap = { .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM };
>
> > + int i, j;
> > + struct locking_thread_input *input = (struct locking_test_thread *)arg;
> > +
> > + cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
>
> If we initialize the cap field during the declaration, then this line goes away.
>
> > +
> > + for (i = 0; i < 1000; ++i) {
> > + j = input->source_fds[i % LOCK_TESTING_THREADS];
> > + cap.args[0] = input->source_fds[j];
> > + /*
> > + * Call IOCTL directly without checking return code. We are
> > + * simply trying to confirm there is no deadlock from userspace
> > + * not check correctness of migration here.
> > + */
> > + ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap);
>
> Should we use `vm_enable_cap()` here?
>
> > + }
> > +}
> > +
> > +static void test_sev_migrate_locking(void)
> > +{
> > + struct locking_thread_input input[LOCK_TESTING_THREADS];
> > + pthread_t pt[LOCK_TESTING_THREADS];
> > + int i;
> > +
> > + for (i = 0; i < LOCK_TESTING_THREADS; ++i) {
> > + input[i].vm = sev_vm_create(/* es= */ false);
> > + input[0].source_fds[i] = input[i].vm->fd;
> > + }
> > + memcpy(input[1].source_fds, input[0].source_fds,
> > + sizeof(input[1].source_fds));
> > + memcpy(input[2].source_fds, input[0].source_fds,
> > + sizeof(input[2].source_fds));
> > +
> > + for (i = 0; i < LOCK_TESTING_THREADS; ++i)
> > + pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
> > +
> > + for (i = 0; i < LOCK_TESTING_THREADS; ++i)
> > + pthread_join(pt[i], NULL);
> > +}
>
> I think this function/test case deserves a comment to capture some of
> the conversation we had on the list that led to Sean suggesting this
> test case. Speaking of which, should this test case have a
> Suggested-by tag for Sean, since he suggested this test?
Gah. I forgot to check the tags before sending my feedback. Of course,
the suggested-by tag is already there. Second time I made this gaffe
in the last couple of weeks. Sorry for the noise.
>
> > +
> > +int main(int argc, char *argv[])
> > +{
> > + test_sev_migrate_from(/* es= */ false);
> > + test_sev_migrate_from(/* es= */ true);
> > + test_sev_migrate_locking();
> > + return 0;
> > +}
> > --
> > 2.33.0.259.gc128427fd7-goog
> >
>
> Nice test!
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH 3/3 V6] selftest: KVM: Add intra host migration
2021-08-31 13:25 ` Marc Orr
@ 2021-08-31 15:03 ` Peter Gonda
2021-08-31 15:20 ` Marc Orr
0 siblings, 1 reply; 8+ messages in thread
From: Peter Gonda @ 2021-08-31 15:03 UTC (permalink / raw)
To: Marc Orr; +Cc: kvm list, Sean Christopherson, Brijesh Singh, linux-kernel
On Tue, Aug 31, 2021 at 7:26 AM Marc Orr <marcorr@google.com> wrote:
>
> On Tue, Aug 31, 2021 at 6:24 AM Marc Orr <marcorr@google.com> wrote:
> >
> > On Mon, Aug 30, 2021 at 2:29 PM Peter Gonda <pgonda@google.com> wrote:
> > >
> > > Adds testcases for intra host migration for SEV and SEV-ES. Also adds
> > > locking test to confirm no deadlock exists.
> > >
> > > ---
> > > tools/testing/selftests/kvm/Makefile | 1 +
> > > .../selftests/kvm/x86_64/sev_vm_tests.c | 152 ++++++++++++++++++
> > > 2 files changed, 153 insertions(+)
> > > create mode 100644 tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
> > >
> > > Signed-off-by: Peter Gonda <pgonda@google.com>
> > > Suggested-by: Sean Christopherson <seanjc@google.com>
> > > Cc: Marc Orr <marcorr@google.com>
> > > Cc: Sean Christopherson <seanjc@google.com>
> > > Cc: Brijesh Singh <brijesh.singh@amd.com>
> > > Cc: kvm@vger.kernel.org
> > > Cc: linux-kernel@vger.kernel.org
> > >
> > > diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> > > index 5832f510a16c..de6e64d5c9c4 100644
> > > --- a/tools/testing/selftests/kvm/Makefile
> > > +++ b/tools/testing/selftests/kvm/Makefile
> > > @@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
> > > TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
> > > TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
> > > TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
> > > +TEST_GEN_PROGS_x86_64 += x86_64/sev_vm_tests
> > > TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
> > > TEST_GEN_PROGS_x86_64 += demand_paging_test
> > > TEST_GEN_PROGS_x86_64 += dirty_log_test
> > > diff --git a/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
> > > new file mode 100644
> > > index 000000000000..50a770316628
> > > --- /dev/null
> > > +++ b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
> > > @@ -0,0 +1,150 @@
> > > +// SPDX-License-Identifier: GPL-2.0-only
> > > +#include <linux/kvm.h>
> > > +#include <linux/psp-sev.h>
> > > +#include <stdio.h>
> > > +#include <sys/ioctl.h>
> > > +#include <stdlib.h>
> > > +#include <errno.h>
> > > +#include <pthread.h>
> > > +
> > > +#include "test_util.h"
> > > +#include "kvm_util.h"
> > > +#include "processor.h"
> > > +#include "svm_util.h"
> > > +#include "kvm_util.h"
> > > +#include "kselftest.h"
> > > +#include "../lib/kvm_util_internal.h"
> > > +
> > > +#define SEV_DEV_PATH "/dev/sev"
> > > +
> > > +/*
> > > + * Open SEV_DEV_PATH if available, otherwise exit the entire program.
> > > + *
> > > + * Input Args:
> > > + * flags - The flags to pass when opening SEV_DEV_PATH.
> > > + *
> > > + * Return:
> > > + * The opened file descriptor of /dev/sev.
> > > + */
> > > +static int open_sev_dev_path_or_exit(int flags)
> > > +{
> > > + static int fd;
> > > +
> > > + if (fd != 0)
> > > + return fd;
> > > +
> > > + fd = open(SEV_DEV_PATH, flags);
> > > + if (fd < 0) {
> > > + print_skip("%s not available, is SEV not enabled? (errno: %d)",
> > > + SEV_DEV_PATH, errno);
> > > + exit(KSFT_SKIP);
> > > + }
> > > +
> > > + return fd;
> > > +}
> > > +
> > > +static void sev_ioctl(int fd, int cmd_id, void *data)
> > > +{
> > > + struct kvm_sev_cmd cmd = { 0 };
> > > + int ret;
> > > +
> > > + TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX, "Unknown SEV CMD : %d\n", cmd_id);
> > > +
> > > + cmd.id = cmd_id;
> > > + cmd.sev_fd = open_sev_dev_path_or_exit(0);
> > > + cmd.data = (uint64_t)data;
> > > + ret = ioctl(fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
> > > + TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
> > > + "%d failed: return code: %d, errno: %d, fw error: %d",
> > > + cmd_id, ret, errno, cmd.error);
> > > +}
> >
> > nit: Since this function has two file descriptors, `fd` and
> > `cmd.sev_fd`, can we rename `fd` to `vm_fd`?
> >
> > > +
> > > +static struct kvm_vm *sev_vm_create(bool es)
> > > +{
> > > + struct kvm_vm *vm;
> > > + struct kvm_sev_launch_start start = { 0 };
> > > + int i;
> > > +
> > > + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
> > > + sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
> > > + for (i = 0; i < 3; ++i)
> >
> > nit: Consider moving `3` to a macro, like `MAX_VCPU_IDX` or maybe
> > better defining something like `NUM_VCPUS` to be 4.
> >
> > > + vm_vcpu_add(vm, i);
> > > + start.policy |= (es) << 2;
> > > + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
> > > + if (es)
> > > + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
> > > + return vm;
> > > +}
> > > +
> > > +static void test_sev_migrate_from(bool es)
> > > +{
> > > + struct kvm_vm *vms[3];
> >
> > If we create a `NUM_VCPUS` macro, then we can use it here.
> >
> > > + struct kvm_enable_cap cap = { 0 };
> > > + int i;
> > > +
> > > + for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i)
> > > + vms[i] = sev_vm_create(es);
> > > +
> > > + cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
> > > + for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *) - 1; ++i) {
> > > + cap.args[0] = vms[i]->fd;
> > > + vm_enable_cap(vms[i + 1], &cap);
> > > + }
> >
> > nit/optional: To me, the code would be more clear if we combined this
> > loop with the one above and guarded calling `vm_enable_cap()` with `if
> > (i > 0)`. Also, maybe we can initialize `cap` when it's declared.
> >
> > struct kvm_enable_cap cap = { .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM };
> > int i;
> >
> > for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i) {
> > vms[i] = sev_vm_create(es);
> > if (i > 0)
> > vm_enable_cap(vms[i], &cap);
> > }
> >
> > > +}
> > > +
> > > +#define LOCK_TESTING_THREADS 3
> >
> > nit: Consider moving this macro to the top of the file.
> >
> > > +
> > > +struct locking_thread_input {
> > > + struct kvm_vm *vm;
> > > + int source_fds[LOCK_TESTING_THREADS];
> > > +};
> > > +
> > > +static void *locking_test_thread(void *arg)
> > > +{
> > > + struct kvm_enable_cap cap = { 0 };
> >
> > Maybe:
> > struct kvm_enable_cap cap = { .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM };
> >
> > > + int i, j;
> > > + struct locking_thread_input *input = (struct locking_test_thread *)arg;
> > > +
> > > + cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
> >
> > If we initialize the cap field during the declaration, then this line goes away.
> >
> > > +
> > > + for (i = 0; i < 1000; ++i) {
> > > + j = input->source_fds[i % LOCK_TESTING_THREADS];
> > > + cap.args[0] = input->source_fds[j];
> > > + /*
> > > + * Call IOCTL directly without checking return code. We are
> > > + * simply trying to confirm there is no deadlock from userspace
> > > + * not check correctness of migration here.
> > > + */
> > > + ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap);
> >
> > Should we use `vm_enable_cap()` here?
> >
I took all other suggestions but this one I just updated the comment.
vm_enable_cap TEST_ASSERT()s if the ioctl fails. Since we are just
randomly iterating through a bunch of ioclts we fail often so this
would kill the test.
> > > + }
> > > +}
> > > +
> > > +static void test_sev_migrate_locking(void)
> > > +{
> > > + struct locking_thread_input input[LOCK_TESTING_THREADS];
> > > + pthread_t pt[LOCK_TESTING_THREADS];
> > > + int i;
> > > +
> > > + for (i = 0; i < LOCK_TESTING_THREADS; ++i) {
> > > + input[i].vm = sev_vm_create(/* es= */ false);
> > > + input[0].source_fds[i] = input[i].vm->fd;
> > > + }
> > > + memcpy(input[1].source_fds, input[0].source_fds,
> > > + sizeof(input[1].source_fds));
> > > + memcpy(input[2].source_fds, input[0].source_fds,
> > > + sizeof(input[2].source_fds));
> > > +
> > > + for (i = 0; i < LOCK_TESTING_THREADS; ++i)
> > > + pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
> > > +
> > > + for (i = 0; i < LOCK_TESTING_THREADS; ++i)
> > > + pthread_join(pt[i], NULL);
> > > +}
> >
> > I think this function/test case deserves a comment to capture some of
> > the conversation we had on the list that led to Sean suggesting this
> > test case. Speaking of which, should this test case have a
> > Suggested-by tag for Sean, since he suggested this test?
>
> Gah. I forgot to check the tags before sending my feedback. Of course,
> the suggested-by tag is already there. Second time I made this gaffe
> in the last couple of weeks. Sorry for the noise.
>
> >
> > > +
> > > +int main(int argc, char *argv[])
> > > +{
> > > + test_sev_migrate_from(/* es= */ false);
> > > + test_sev_migrate_from(/* es= */ true);
> > > + test_sev_migrate_locking();
> > > + return 0;
> > > +}
> > > --
> > > 2.33.0.259.gc128427fd7-goog
> > >
> >
> > Nice test!
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH 3/3 V6] selftest: KVM: Add intra host migration
2021-08-31 15:03 ` Peter Gonda
@ 2021-08-31 15:20 ` Marc Orr
0 siblings, 0 replies; 8+ messages in thread
From: Marc Orr @ 2021-08-31 15:20 UTC (permalink / raw)
To: Peter Gonda; +Cc: kvm list, Sean Christopherson, Brijesh Singh, linux-kernel
> > > > +
> > > > + for (i = 0; i < 1000; ++i) {
> > > > + j = input->source_fds[i % LOCK_TESTING_THREADS];
> > > > + cap.args[0] = input->source_fds[j];
> > > > + /*
> > > > + * Call IOCTL directly without checking return code. We are
> > > > + * simply trying to confirm there is no deadlock from userspace
> > > > + * not check correctness of migration here.
> > > > + */
> > > > + ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap);
> > >
> > > Should we use `vm_enable_cap()` here?
> > >
>
> I took all other suggestions but this one I just updated the comment.
> vm_enable_cap TEST_ASSERT()s if the ioctl fails. Since we are just
> randomly iterating through a bunch of ioclts we fail often so this
> would kill the test.
Gah, of course! My bad. Anyway, sounds good and thanks!
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 0/2 V6] Add AMD SEV and SEV-ES intra host migration support
@ 2021-08-30 20:57 Peter Gonda
2021-08-30 20:57 ` [PATCH 3/3 V6] selftest: KVM: Add intra host migration Peter Gonda
0 siblings, 1 reply; 8+ messages in thread
From: Peter Gonda @ 2021-08-30 20:57 UTC (permalink / raw)
To: kvm
Cc: Peter Gonda, Paolo Bonzini, Sean Christopherson, David Rientjes,
Dr . David Alan Gilbert, Brijesh Singh, Vitaly Kuznetsov,
Wanpeng Li, Jim Mattson, Joerg Roedel, Thomas Gleixner,
Ingo Molnar, Borislav Petkov, H. Peter Anvin, linux-kernel
Intra host migration provides a low-cost mechanism for userspace VMM
upgrades. It is an alternative to traditional (i.e., remote) live
migration. Whereas remote migration handles moving a guest to a new host,
intra host migration only handles moving a guest to a new userspace VMM
within a host. This can be used to update, rollback, change flags of the
VMM, etc. The lower cost compared to live migration comes from the fact
that the guest's memory does not need to be copied between processes. A
handle to the guest memory simply gets passed to the new VMM, this could
be done via /dev/shm with share=on or similar feature.
The guest state can be transferred from an old VMM to a new VMM as follows:
1. Export guest state from KVM to the old user-space VMM via a getter
user-space/kernel API 2. Transfer guest state from old VMM to new VMM via
IPC communication 3. Import guest state into KVM from the new user-space
VMM via a setter user-space/kernel API VMMs by exporting from KVM using
getters, sending that data to the new VMM, then setting it again in KVM.
In the common case for intra host migration, we can rely on the normal
ioctls for passing data from one VMM to the next. SEV, SEV-ES, and other
confidential compute environments make most of this information opaque, and
render KVM ioctls such as "KVM_GET_REGS" irrelevant. As a result, we need
the ability to pass this opaque metadata from one VMM to the next. The
easiest way to do this is to leave this data in the kernel, and transfer
ownership of the metadata from one KVM VM (or vCPU) to the next. For
example, we need to move the SEV enabled ASID, VMSAs, and GHCB metadata
from one VMM to the next. In general, we need to be able to hand off any
data that would be unsafe/impossible for the kernel to hand directly to
userspace (and cannot be reproduced using data that can be handed safely to
userspace).
For the intra host operation the SEV required metadata, the source VM FD is
sent to the target VMM. The target VMM calls the new cap ioctl with the
source VM FD, KVM then moves all the SEV state to the target VM from the
source VM.
V6
* Add selftest.
V5:
* Fix up locking scheme
* Address marcorr@ comments.
V4:
* Move to seanjc@'s suggestion of source VM FD based single ioctl design.
v3:
* Fix memory leak found by dan.carpenter@
v2:
* Added marcorr@ reviewed by tag
* Renamed function introduced in 1/3
* Edited with seanjc@'s review comments
** Cleaned up WARN usage
** Userspace makes random token now
* Edited with brijesh.singh@'s review comments
** Checks for different LAUNCH_* states in send function
v1: https://lore.kernel.org/kvm/20210621163118.1040170-1-pgonda@google.com/
Peter Gonda (2):
KVM, SEV: Add support for SEV intra host migration
KVM, SEV: Add support for SEV-ES intra host migration
Documentation/virt/kvm/api.rst | 15 +++
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm/sev.c | 157 ++++++++++++++++++++++++++++++++
arch/x86/kvm/svm/svm.c | 1 +
arch/x86/kvm/svm/svm.h | 2 +
arch/x86/kvm/x86.c | 5 +
include/uapi/linux/kvm.h | 1 +
7 files changed, 182 insertions(+)
base-commit: a3e0b8bd99ab
Peter Gonda (3):
KVM, SEV: Add support for SEV intra host migration
KVM, SEV: Add support for SEV-ES intra host migration
selftesting
Documentation/virt/kvm/api.rst | 15 ++
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm/sev.c | 157 ++++++++++++++++++
arch/x86/kvm/svm/svm.c | 1 +
arch/x86/kvm/svm/svm.h | 2 +
arch/x86/kvm/x86.c | 5 +
include/uapi/linux/kvm.h | 1 +
tools/testing/selftests/kvm/Makefile | 1 +
.../selftests/kvm/x86_64/sev_vm_tests.c | 152 +++++++++++++++++
9 files changed, 335 insertions(+)
create mode 100644 tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
--
2.33.0.259.gc128427fd7-goog
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 3/3 V6] selftest: KVM: Add intra host migration
2021-08-30 20:57 [PATCH 0/2 V6] Add AMD SEV and SEV-ES intra host migration support Peter Gonda
@ 2021-08-30 20:57 ` Peter Gonda
2021-09-01 14:53 ` kernel test robot
0 siblings, 1 reply; 8+ messages in thread
From: Peter Gonda @ 2021-08-30 20:57 UTC (permalink / raw)
To: kvm; +Cc: Peter Gonda
Adds testcases for intra host migration for SEV and SEV-ES. Also adds
locking test to confirm no deadlock exists.
---
tools/testing/selftests/kvm/Makefile | 1 +
.../selftests/kvm/x86_64/sev_vm_tests.c | 152 ++++++++++++++++++
2 files changed, 153 insertions(+)
create mode 100644 tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 5832f510a16c..de6e64d5c9c4 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
+TEST_GEN_PROGS_x86_64 += x86_64/sev_vm_tests
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
diff --git a/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
new file mode 100644
index 000000000000..50a770316628
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kvm.h>
+#include <linux/psp-sev.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <pthread.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "kvm_util.h"
+#include "kselftest.h"
+#include "../lib/kvm_util_internal.h"
+
+#define SEV_DEV_PATH "/dev/sev"
+
+/*
+ * Open SEV_DEV_PATH if available, otherwise exit the entire program.
+ *
+ * Input Args:
+ * flags - The flags to pass when opening SEV_DEV_PATH.
+ *
+ * Return:
+ * The opened file descriptor of /dev/sev.
+ */
+static int open_sev_dev_path_or_exit(int flags)
+{
+ static int fd;
+
+ if (fd != 0)
+ return fd;
+
+ fd = open(SEV_DEV_PATH, flags);
+ if (fd < 0) {
+ print_skip("%s not available, is SEV not enabled? (errno: %d)",
+ SEV_DEV_PATH, errno);
+ exit(KSFT_SKIP);
+ }
+
+ return fd;
+}
+
+static void sev_ioctl(int fd, int cmd_id, void *data)
+{
+ struct kvm_sev_cmd cmd = { 0 };
+ int ret;
+
+ TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX, "Unknown SEV CMD : %d\n", cmd_id);
+
+ cmd.id = cmd_id;
+ cmd.sev_fd = open_sev_dev_path_or_exit(0);
+ cmd.data = (uint64_t)data;
+ ret = ioctl(fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
+ TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
+ "%d failed: return code: %d, errno: %d, fw error: %d",
+ cmd_id, ret, errno, cmd.error);
+}
+
+static struct kvm_vm *sev_vm_create(bool es)
+{
+ struct kvm_vm *vm;
+ struct kvm_sev_launch_start start = { 0 };
+ int i;
+
+ vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
+ for (i = 0; i < 3; ++i)
+ vm_vcpu_add(vm, i);
+ start.policy |= (es) << 2;
+ sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
+ if (es)
+ sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+ return vm;
+}
+
+static void test_sev_migrate_from(bool es)
+{
+ struct kvm_vm *vms[3];
+ struct kvm_enable_cap cap = { 0 };
+ int i;
+
+ for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *); ++i)
+ vms[i] = sev_vm_create(es);
+
+ cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
+ for (i = 0; i < sizeof(vms) / sizeof(struct kvm_vm *) - 1; ++i) {
+ cap.args[0] = vms[i]->fd;
+ vm_enable_cap(vms[i + 1], &cap);
+ }
+}
+
+#define LOCK_TESTING_THREADS 3
+
+struct locking_thread_input {
+ struct kvm_vm *vm;
+ int source_fds[LOCK_TESTING_THREADS];
+};
+
+static void *locking_test_thread(void *arg)
+{
+ struct kvm_enable_cap cap = { 0 };
+ int i, j;
+ struct locking_thread_input *input = (struct locking_test_thread *)arg;
+
+ cap.cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM;
+
+ for (i = 0; i < 1000; ++i) {
+ j = input->source_fds[i % LOCK_TESTING_THREADS];
+ cap.args[0] = input->source_fds[j];
+ /*
+ * Call IOCTL directly without checking return code. We are
+ * simply trying to confirm there is no deadlock from userspace
+ * not check correctness of migration here.
+ */
+ ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap);
+ }
+}
+
+static void test_sev_migrate_locking(void)
+{
+ struct locking_thread_input input[LOCK_TESTING_THREADS];
+ pthread_t pt[LOCK_TESTING_THREADS];
+ int i;
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i) {
+ input[i].vm = sev_vm_create(/* es= */ false);
+ input[0].source_fds[i] = input[i].vm->fd;
+ }
+ memcpy(input[1].source_fds, input[0].source_fds,
+ sizeof(input[1].source_fds));
+ memcpy(input[2].source_fds, input[0].source_fds,
+ sizeof(input[2].source_fds));
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i)
+ pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
+
+ for (i = 0; i < LOCK_TESTING_THREADS; ++i)
+ pthread_join(pt[i], NULL);
+}
+
+int main(int argc, char *argv[])
+{
+ test_sev_migrate_from(/* es= */ false);
+ test_sev_migrate_from(/* es= */ true);
+ test_sev_migrate_locking();
+ return 0;
+}
--
2.33.0.259.gc128427fd7-goog
^ permalink raw reply related [flat|nested] 8+ messages in thread
end of thread, other threads:[~2021-09-01 14:54 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-08-30 21:29 [PATCH 3/3 V6] selftest: KVM: Add intra host migration Peter Gonda
-- strict thread matches above, loose matches on Subject: below --
2021-08-30 21:29 Peter Gonda
2021-08-31 13:24 ` Marc Orr
2021-08-31 13:25 ` Marc Orr
2021-08-31 15:03 ` Peter Gonda
2021-08-31 15:20 ` Marc Orr
2021-08-30 20:57 [PATCH 0/2 V6] Add AMD SEV and SEV-ES intra host migration support Peter Gonda
2021-08-30 20:57 ` [PATCH 3/3 V6] selftest: KVM: Add intra host migration Peter Gonda
2021-09-01 14:53 ` kernel test robot
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox