From: Vitaly Kuznetsov <vkuznets@redhat.com>
To: Maxim Levitsky <mlevitsk@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>,
Wanpeng Li <wanpengli@tencent.com>,
Jim Mattson <jmattson@google.com>,
Michael Kelley <mikelley@microsoft.com>,
Siddharth Chandrasekaran <sidcha@amazon.de>,
linux-hyperv@vger.kernel.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>
Subject: Re: [PATCH v3 26/34] KVM: selftests: Hyper-V PV TLB flush selftest
Date: Tue, 24 May 2022 16:51:16 +0200 [thread overview]
Message-ID: <87czg3rnkr.fsf@redhat.com> (raw)
In-Reply-To: <fd38937d4305bf606e0da687fc11b4866f575275.camel@redhat.com>
Maxim Levitsky <mlevitsk@redhat.com> writes:
> On Thu, 2022-04-14 at 15:20 +0200, Vitaly Kuznetsov wrote:
>> Introduce a selftest for Hyper-V PV TLB flush hypercalls
>> (HvFlushVirtualAddressSpace/HvFlushVirtualAddressSpaceEx,
>> HvFlushVirtualAddressList/HvFlushVirtualAddressListEx).
>>
>> The test creates one 'sender' vCPU and two 'worker' vCPU which do busy
>> loop reading from a certain GVA checking the observed value. Sender
>> vCPU drops to the host to swap the data page with another page filled
>> with a different value. The expectation for workers is also
>> altered. Without TLB flush on worker vCPUs, they may continue to
>> observe old value. To guard against accidental TLB flushes for worker
>> vCPUs the test is repeated 100 times.
>>
>> Hyper-V TLB flush hypercalls are tested in both 'normal' and 'XMM
>> fast' modes.
>>
>> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
>> ---
>> tools/testing/selftests/kvm/.gitignore | 1 +
>> tools/testing/selftests/kvm/Makefile | 1 +
>> .../selftests/kvm/include/x86_64/hyperv.h | 1 +
>> .../selftests/kvm/x86_64/hyperv_tlb_flush.c | 647 ++++++++++++++++++
>> 4 files changed, 650 insertions(+)
>> create mode 100644 tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
>>
>> diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
>> index 5d5fbb161d56..1a1d09e414d5 100644
>> --- a/tools/testing/selftests/kvm/.gitignore
>> +++ b/tools/testing/selftests/kvm/.gitignore
>> @@ -25,6 +25,7 @@
>> /x86_64/hyperv_features
>> /x86_64/hyperv_ipi
>> /x86_64/hyperv_svm_test
>> +/x86_64/hyperv_tlb_flush
>> /x86_64/mmio_warning_test
>> /x86_64/mmu_role_test
>> /x86_64/platform_info_test
>> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
>> index 44889f897fe7..8b83abc09a1a 100644
>> --- a/tools/testing/selftests/kvm/Makefile
>> +++ b/tools/testing/selftests/kvm/Makefile
>> @@ -54,6 +54,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
>> TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
>> TEST_GEN_PROGS_x86_64 += x86_64/hyperv_ipi
>> TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
>> +TEST_GEN_PROGS_x86_64 += x86_64/hyperv_tlb_flush
>> TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
>> TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
>> TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
>> diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
>> index f51d6fab8e93..1e34dd7c5075 100644
>> --- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h
>> +++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
>> @@ -185,6 +185,7 @@
>> /* hypercall options */
>> #define HV_HYPERCALL_FAST_BIT BIT(16)
>> #define HV_HYPERCALL_VARHEAD_OFFSET 17
>> +#define HV_HYPERCALL_REP_COMP_OFFSET 32
>>
>> #define HYPERV_LINUX_OS_ID ((u64)0x8100 << 48)
>>
>> diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c b/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
>> new file mode 100644
>> index 000000000000..00bcae45ddd2
>> --- /dev/null
>> +++ b/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
>> @@ -0,0 +1,647 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/*
>> + * Hyper-V HvFlushVirtualAddress{List,Space}{,Ex} tests
>> + *
>> + * Copyright (C) 2022, Red Hat, Inc.
>> + *
>> + */
>> +
>> +#define _GNU_SOURCE /* for program_invocation_short_name */
>> +#include <pthread.h>
>> +#include <inttypes.h>
>> +
>> +#include "kvm_util.h"
>> +#include "hyperv.h"
>> +#include "processor.h"
>> +#include "test_util.h"
>> +#include "vmx.h"
>> +
>> +#define SENDER_VCPU_ID 1
>> +#define WORKER_VCPU_ID_1 2
>> +#define WORKER_VCPU_ID_2 65
>> +
>> +#define NTRY 100
>> +
>> +struct thread_params {
>> + struct kvm_vm *vm;
>> + uint32_t vcpu_id;
>> +};
>> +
>> +struct hv_vpset {
>> + u64 format;
>> + u64 valid_bank_mask;
>> + u64 bank_contents[];
>> +};
>> +
>> +enum HV_GENERIC_SET_FORMAT {
>> + HV_GENERIC_SET_SPARSE_4K,
>> + HV_GENERIC_SET_ALL,
>> +};
>> +
>> +#define HV_FLUSH_ALL_PROCESSORS BIT(0)
>> +#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
>> +#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
>> +#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
>> +
>> +/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
>> +struct hv_tlb_flush {
>> + u64 address_space;
>> + u64 flags;
>> + u64 processor_mask;
>> + u64 gva_list[];
>> +} __packed;
>> +
>> +/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
>> +struct hv_tlb_flush_ex {
>> + u64 address_space;
>> + u64 flags;
>> + struct hv_vpset hv_vp_set;
>> + u64 gva_list[];
>> +} __packed;
>> +
>> +static inline void hv_init(vm_vaddr_t pgs_gpa)
>> +{
>> + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
>> + wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
>> +}
>> +
>> +static void worker_code(void *test_pages, vm_vaddr_t pgs_gpa)
>> +{
>> + u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
>> + unsigned char chr;
>> +
>> + x2apic_enable();
>> + hv_init(pgs_gpa);
>> +
>> + for (;;) {
>> + chr = READ_ONCE(*(unsigned char *)(test_pages + 4096 * 2 + vcpu_id));
> It would be nice to wrap this into a function, like set_expected_char does for ease
> of code understanding.
>
>> + if (chr)
>> + GUEST_ASSERT(*(unsigned char *)test_pages == chr);
>> + asm volatile("nop");
>> + }
>> +}
>> +
>> +static inline u64 hypercall(u64 control, vm_vaddr_t arg1, vm_vaddr_t arg2)
>> +{
>> + u64 hv_status;
>> +
>> + asm volatile("mov %3, %%r8\n"
>> + "vmcall"
>> + : "=a" (hv_status),
>> + "+c" (control), "+d" (arg1)
>> + : "r" (arg2)
>> + : "cc", "memory", "r8", "r9", "r10", "r11");
>> +
>> + return hv_status;
>> +}
>> +
>> +static inline void nop_loop(void)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < 10000000; i++)
>> + asm volatile("nop");
>> +}
>> +
>> +static inline void sync_to_xmm(void *data)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < 8; i++)
>> + write_sse_reg(i, (sse128_t *)(data + sizeof(sse128_t) * i));
>> +}
>
> Nitpick: I see duplicated code, I complain ;-) - maybe put the above to some common file?
>
Gone now.
>> +
>> +static void set_expected_char(void *addr, unsigned char chr, int vcpu_id)
>> +{
>> + asm volatile("mfence");
>
> I remember that Paolo once told me (I might not remember that correctly though),
> that on x86 the actual hardware barriers like mfence are not really
> needed, because hardware already does memory accesses in order,
> unless fancy (e.g non WB) memory types are used.
Even if it can be dropped we still need a compile barrier so I prefer to
keep explicit 'mfence'/'lfence'/... -- especially in tests where
performance doesn't matter much.
>
>> + *(unsigned char *)(addr + 2 * 4096 + vcpu_id) = chr;
>> +}
>> +
>> +static void sender_guest_code(void *hcall_page, void *test_pages, vm_vaddr_t pgs_gpa)
>> +{
>> + struct hv_tlb_flush *flush = (struct hv_tlb_flush *)hcall_page;
>> + struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)hcall_page;
>> + int stage = 1, i;
>> + u64 res;
>> +
>> + hv_init(pgs_gpa);
>> +
>> + /* "Slow" hypercalls */
>
> I hopefully understand it correctly, see my comments below,
> but it might be worthy to add something similar to my comments
> to the code to make it easier for someone reading the code to understand it.
>
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>
> Here we set expected char to 0, meaning that now workers will not assert
> if there is mismatch.
>
>> + GUEST_SYNC(stage++);
> Now there is a mismatch, the host swapped pages for us.
>
>> + flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush->processor_mask = BIT(WORKER_VCPU_ID_1);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>
> Now we flushed the TLB, the guest should see correct value.
>
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>
> Now we force the workers to check it.
>
> Btw, an idea: it might be nice to use more that two test pages,
> like say 100 test pages each filled with different value,
> memory is cheap, and this way there will be no way for something
> to cause 'double error' which could hide the bug by a chance.
>
>
> Another thing, it might be nice to wrap this into a macro/function
> to avoid *that* much duplication.
In the next version I still keep two pages and two workers for
simpliciy, but I wrap all these pre- and post- guts into wrapper
functions.
>
>
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush->processor_mask = BIT(WORKER_VCPU_ID_1);
>> + flush->gva_list[0] = (u64)test_pages;
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS;
>> + flush->processor_mask = 0;
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS;
>> + flush->gva_list[0] = (u64)test_pages;
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
>> + flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
>> + flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
>> + (1 << HV_HYPERCALL_VARHEAD_OFFSET),
>> + pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
>> + flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
>> + flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
>> + /* bank_contents and gva_list occupy the same space, thus [1] */
>> + flush_ex->gva_list[1] = (u64)test_pages;
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
>> + (1 << HV_HYPERCALL_VARHEAD_OFFSET) |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
>> + flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
>> + BIT_ULL(WORKER_VCPU_ID_1 / 64);
>> + flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
>> + flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
>> + (2 << HV_HYPERCALL_VARHEAD_OFFSET),
>> + pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
>> + flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
>> + BIT_ULL(WORKER_VCPU_ID_2 / 64);
>> + flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
>> + flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
>> + /* bank_contents and gva_list occupy the same space, thus [2] */
>> + flush_ex->gva_list[2] = (u64)test_pages;
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
>> + (2 << HV_HYPERCALL_VARHEAD_OFFSET) |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
>> + pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
>> + flush_ex->gva_list[0] = (u64)test_pages;
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + pgs_gpa, pgs_gpa + 4096);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* "Fast" hypercalls */
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush->processor_mask = BIT(WORKER_VCPU_ID_1);
>> + sync_to_xmm(&flush->processor_mask);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
>> + HV_HYPERCALL_FAST_BIT, 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush->processor_mask = BIT(WORKER_VCPU_ID_1);
>> + flush->gva_list[0] = (u64)test_pages;
>> + sync_to_xmm(&flush->processor_mask);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST | HV_HYPERCALL_FAST_BIT |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + sync_to_xmm(&flush->processor_mask);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
>> + HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush->gva_list[0] = (u64)test_pages;
>> + sync_to_xmm(&flush->processor_mask);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST | HV_HYPERCALL_FAST_BIT |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET), 0x0,
>> + HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
>> + flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
>> + flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
>> + sync_to_xmm(&flush_ex->hv_vp_set);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX | HV_HYPERCALL_FAST_BIT |
>> + (1 << HV_HYPERCALL_VARHEAD_OFFSET),
>> + 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
>> + flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
>> + flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
>> + /* bank_contents and gva_list occupy the same space, thus [1] */
>> + flush_ex->gva_list[1] = (u64)test_pages;
>> + sync_to_xmm(&flush_ex->hv_vp_set);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX | HV_HYPERCALL_FAST_BIT |
>> + (1 << HV_HYPERCALL_VARHEAD_OFFSET) |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
>> + flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
>> + BIT_ULL(WORKER_VCPU_ID_1 / 64);
>> + flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
>> + flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
>> + sync_to_xmm(&flush_ex->hv_vp_set);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX | HV_HYPERCALL_FAST_BIT |
>> + (2 << HV_HYPERCALL_VARHEAD_OFFSET),
>> + 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
>> + flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
>> + BIT_ULL(WORKER_VCPU_ID_2 / 64);
>> + flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
>> + flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
>> + /* bank_contents and gva_list occupy the same space, thus [2] */
>> + flush_ex->gva_list[2] = (u64)test_pages;
>> + sync_to_xmm(&flush_ex->hv_vp_set);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX | HV_HYPERCALL_FAST_BIT |
>> + (2 << HV_HYPERCALL_VARHEAD_OFFSET) |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
>> + sync_to_xmm(&flush_ex->hv_vp_set);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX | HV_HYPERCALL_FAST_BIT,
>> + 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
>> + for (i = 0; i < NTRY; i++) {
>> + memset(hcall_page, 0, 4096);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, 0x0, WORKER_VCPU_ID_2);
>> + GUEST_SYNC(stage++);
>> + flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
>> + flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
>> + flush_ex->gva_list[0] = (u64)test_pages;
>> + sync_to_xmm(&flush_ex->hv_vp_set);
>> + res = hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX | HV_HYPERCALL_FAST_BIT |
>> + (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
>> + 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
>> + GUEST_ASSERT((res & 0xffff) == 0);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_1);
>> + set_expected_char(test_pages, i % 2 ? 0x1 : 0x2, WORKER_VCPU_ID_2);
>> + nop_loop();
>> + }
>> +
>> + GUEST_DONE();
>> +}
>> +
>> +static void *vcpu_thread(void *arg)
>> +{
>> + struct thread_params *params = (struct thread_params *)arg;
>> + struct ucall uc;
>> + int old;
>> + int r;
>> + unsigned int exit_reason;
>> +
>> + r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
>> + TEST_ASSERT(r == 0,
>> + "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
>> + params->vcpu_id, r);
>> +
>> + vcpu_run(params->vm, params->vcpu_id);
>> + exit_reason = vcpu_state(params->vm, params->vcpu_id)->exit_reason;
>> +
>> + TEST_ASSERT(exit_reason == KVM_EXIT_IO,
>> + "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
>> + params->vcpu_id, exit_reason, exit_reason_str(exit_reason));
>> +
>> + if (get_ucall(params->vm, params->vcpu_id, &uc) == UCALL_ABORT) {
>> + TEST_ASSERT(false,
>> + "vCPU %u exited with error: %s.\n",
>> + params->vcpu_id, (const char *)uc.args[0]);
>> + }
>> +
>> + return NULL;
>> +}
>> +
>> +static void cancel_join_vcpu_thread(pthread_t thread, uint32_t vcpu_id)
>> +{
>> + void *retval;
>> + int r;
>> +
>> + r = pthread_cancel(thread);
>> + TEST_ASSERT(r == 0,
>> + "pthread_cancel on vcpu_id=%d failed with errno=%d",
>> + vcpu_id, r);
>> +
>> + r = pthread_join(thread, &retval);
>> + TEST_ASSERT(r == 0,
>> + "pthread_join on vcpu_id=%d failed with errno=%d",
>> + vcpu_id, r);
>> + TEST_ASSERT(retval == PTHREAD_CANCELED,
>> + "expected retval=%p, got %p", PTHREAD_CANCELED,
>> + retval);
>> +}
>> +
>> +int main(int argc, char *argv[])
>> +{
>> + int r;
>> + pthread_t threads[2];
>> + struct thread_params params[2];
>> + struct kvm_vm *vm;
>> + struct kvm_run *run;
>> + vm_vaddr_t hcall_page, test_pages;
>> + struct ucall uc;
>> + int stage = 1;
>> +
>> + vm = vm_create_default(SENDER_VCPU_ID, 0, sender_guest_code);
>> + params[0].vm = vm;
>> + params[1].vm = vm;
>> +
>> + /* Hypercall input/output */
>> + hcall_page = vm_vaddr_alloc_pages(vm, 2);
>> + memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
>> +
>> + /*
>> + * Test pages: the first one is filled with '0x1's, the second with '0x2's
>> + * and the test will swap their mappings. The third page keeps the indication
>> + * about the current state of mappings.
>> + */
>> + test_pages = vm_vaddr_alloc_pages(vm, 3);
>> + memset(addr_gva2hva(vm, test_pages), 0x1, 4096);
>> + memset(addr_gva2hva(vm, test_pages) + 4096, 0x2, 4096);
>> + set_expected_char(addr_gva2hva(vm, test_pages), 0x0, WORKER_VCPU_ID_1);
>> + set_expected_char(addr_gva2hva(vm, test_pages), 0x0, WORKER_VCPU_ID_2);
>> +
>> + vm_vcpu_add_default(vm, WORKER_VCPU_ID_1, worker_code);
>> + vcpu_args_set(vm, WORKER_VCPU_ID_1, 2, test_pages, addr_gva2gpa(vm, hcall_page));
>> + vcpu_set_msr(vm, WORKER_VCPU_ID_1, HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_1);
>> + vcpu_set_hv_cpuid(vm, WORKER_VCPU_ID_1);
>> +
>> + vm_vcpu_add_default(vm, WORKER_VCPU_ID_2, worker_code);
>> + vcpu_args_set(vm, WORKER_VCPU_ID_2, 2, test_pages, addr_gva2gpa(vm, hcall_page));
>> + vcpu_set_msr(vm, WORKER_VCPU_ID_2, HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_2);
>> + vcpu_set_hv_cpuid(vm, WORKER_VCPU_ID_2);
>> +
>> + vcpu_args_set(vm, SENDER_VCPU_ID, 3, hcall_page, test_pages,
>> + addr_gva2gpa(vm, hcall_page));
>
> It seems that all worker vCPUs get pointer to the hypercall page,
> which they don't need and if used will create a race.
>
Dropped (actually, I've created a new 'test_data' structure which is
shared by workers and sender).
>
>> + vcpu_set_hv_cpuid(vm, SENDER_VCPU_ID);
>> +
>> + params[0].vcpu_id = WORKER_VCPU_ID_1;
>> + r = pthread_create(&threads[0], NULL, vcpu_thread, ¶ms[0]);
>> + TEST_ASSERT(r == 0,
>> + "pthread_create halter failed errno=%d", errno);
>> +
>> + params[1].vcpu_id = WORKER_VCPU_ID_2;
>> + r = pthread_create(&threads[1], NULL, vcpu_thread, ¶ms[1]);
>> + TEST_ASSERT(r == 0,
>> + "pthread_create halter failed errno=%d", errno);
>
> Also here worker threads don't halt, the message was not updated I think.
>
Fixed!
>
>> +
>> + run = vcpu_state(vm, SENDER_VCPU_ID);
>> +
>> + while (true) {
>> + r = _vcpu_run(vm, SENDER_VCPU_ID);
>> + TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
>> + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
>> + "unexpected exit reason: %u (%s)",
>> + run->exit_reason, exit_reason_str(run->exit_reason));
>> +
>> + switch (get_ucall(vm, SENDER_VCPU_ID, &uc)) {
>> + case UCALL_SYNC:
>> + TEST_ASSERT(uc.args[1] == stage,
>> + "Unexpected stage: %ld (%d expected)\n",
>> + uc.args[1], stage);
>> + break;
>> + case UCALL_ABORT:
>> + TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
>> + __FILE__, uc.args[1]);
>> + return 1;
>> + case UCALL_DONE:
>> + return 0;
>> + }
>> +
>> + /* Swap test pages */
>> + if (stage % 2) {
>> + __virt_pg_map(vm, test_pages, addr_gva2gpa(vm, test_pages) + 4096,
>> + X86_PAGE_SIZE_4K, true);
>> + __virt_pg_map(vm, test_pages + 4096, addr_gva2gpa(vm, test_pages) - 4096,
>> + X86_PAGE_SIZE_4K, true);
>> + } else {
>> + __virt_pg_map(vm, test_pages, addr_gva2gpa(vm, test_pages) - 4096,
>> + X86_PAGE_SIZE_4K, true);
>> + __virt_pg_map(vm, test_pages + 4096, addr_gva2gpa(vm, test_pages) + 4096,
>> + X86_PAGE_SIZE_4K, true);
>> + }
>
> Another question: why the host doing the swapping of the pages? Since
> !EPT/!NPT is not the goal of this test,
>
> no doubt, why not let the guest vCPU (the sender) do the swapping, which should eliminate the VM exits
> to the host (which can interfere with TLB flush even) and make it
> closer to the real world usage.
This is actually a good idea. It required some APIs to be exported and
some trickery so the guest can actually reach its PTEs but I think it's
worth it so the next version will be doing all updates from the guest
itself.
>
>
>> +
>> + stage++;
>> + }
>> +
>> + cancel_join_vcpu_thread(threads[0], WORKER_VCPU_ID_1);
>> + cancel_join_vcpu_thread(threads[1], WORKER_VCPU_ID_2);
>> + kvm_vm_free(vm);
>> +
>> + return 0;
>> +}
>
>
--
Vitaly
next prev parent reply other threads:[~2022-05-24 14:51 UTC|newest]
Thread overview: 102+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-14 13:19 [PATCH v3 00/34] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush feature Vitaly Kuznetsov
2022-04-14 13:19 ` [PATCH v3 01/34] KVM: x86: hyper-v: Resurrect dedicated KVM_REQ_HV_TLB_FLUSH flag Vitaly Kuznetsov
2022-05-11 11:18 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 02/34] KVM: x86: hyper-v: Introduce TLB flush ring Vitaly Kuznetsov
2022-05-11 11:19 ` Maxim Levitsky
2022-05-16 14:29 ` Vitaly Kuznetsov
2022-05-16 19:34 ` Sean Christopherson
2022-05-17 13:31 ` Vitaly Kuznetsov
2022-04-14 13:19 ` [PATCH v3 03/34] KVM: x86: hyper-v: Add helper to read hypercall data for array Vitaly Kuznetsov
2022-05-11 11:20 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 04/34] KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently Vitaly Kuznetsov
2022-05-11 11:22 ` Maxim Levitsky
2022-05-18 9:39 ` Vitaly Kuznetsov
2022-05-18 14:18 ` Sean Christopherson
2022-05-18 14:43 ` Vitaly Kuznetsov
2022-05-18 14:55 ` Sean Christopherson
2022-05-16 19:41 ` Sean Christopherson
2022-05-17 13:41 ` Vitaly Kuznetsov
2022-04-14 13:19 ` [PATCH v3 05/34] KVM: x86: hyper-v: Expose support for extended gva ranges for flush hypercalls Vitaly Kuznetsov
2022-05-11 11:23 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 06/34] KVM: x86: Prepare kvm_hv_flush_tlb() to handle L2's GPAs Vitaly Kuznetsov
2022-05-11 11:23 ` Maxim Levitsky
2022-05-11 11:23 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 07/34] x86/hyperv: Introduce HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK constants Vitaly Kuznetsov
2022-04-25 15:47 ` Wei Liu
2022-04-25 17:34 ` Michael Kelley (LINUX)
2022-04-25 19:09 ` Christophe JAILLET
2022-04-25 19:16 ` Christophe JAILLET
2022-05-03 14:59 ` Vitaly Kuznetsov
2022-05-03 11:11 ` Wei Liu
2022-05-11 11:23 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 08/34] KVM: x86: hyper-v: Use HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK instead of raw '64' Vitaly Kuznetsov
2022-05-11 11:24 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 09/34] KVM: x86: hyper-v: Don't use sparse_set_to_vcpu_mask() in kvm_hv_send_ipi() Vitaly Kuznetsov
2022-05-11 11:24 ` Maxim Levitsky
2022-05-16 19:52 ` Sean Christopherson
2022-04-14 13:19 ` [PATCH v3 10/34] KVM: x86: hyper-v: Create a separate ring for L2 TLB flush Vitaly Kuznetsov
2022-05-11 11:24 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 11/34] KVM: x86: hyper-v: Use preallocated buffer in 'struct kvm_vcpu_hv' instead of on-stack 'sparse_banks' Vitaly Kuznetsov
2022-05-11 11:25 ` Maxim Levitsky
2022-05-16 20:05 ` Sean Christopherson
2022-05-17 13:51 ` Vitaly Kuznetsov
2022-05-17 14:04 ` Sean Christopherson
2022-05-17 14:19 ` Vitaly Kuznetsov
2022-04-14 13:19 ` [PATCH v3 12/34] KVM: nVMX: Keep track of hv_vm_id/hv_vp_id when eVMCS is in use Vitaly Kuznetsov
2022-05-11 11:25 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 13/34] KVM: nSVM: Keep track of Hyper-V hv_vm_id/hv_vp_id Vitaly Kuznetsov
2022-05-11 11:27 ` Maxim Levitsky
2022-05-18 12:25 ` Vitaly Kuznetsov
2022-05-18 12:45 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 14/34] KVM: x86: Introduce .post_hv_l2_tlb_flush() nested hook Vitaly Kuznetsov
2022-05-11 11:32 ` Maxim Levitsky
2022-05-18 12:43 ` Vitaly Kuznetsov
2022-05-18 12:49 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 15/34] KVM: x86: hyper-v: Introduce kvm_hv_is_tlb_flush_hcall() Vitaly Kuznetsov
2022-05-11 11:25 ` Maxim Levitsky
2022-05-16 20:09 ` Sean Christopherson
2022-04-14 13:19 ` [PATCH v3 16/34] KVM: x86: hyper-v: L2 TLB flush Vitaly Kuznetsov
2022-05-11 11:29 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 17/34] KVM: x86: hyper-v: Introduce fast kvm_hv_l2_tlb_flush_exposed() check Vitaly Kuznetsov
2022-05-11 11:30 ` Maxim Levitsky
2022-05-19 13:25 ` Vitaly Kuznetsov
2022-05-19 13:28 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 18/34] x86/hyperv: Fix 'struct hv_enlightened_vmcs' definition Vitaly Kuznetsov
2022-05-11 11:30 ` Maxim Levitsky
2022-04-14 13:19 ` [PATCH v3 19/34] KVM: nVMX: hyper-v: Enable L2 TLB flush Vitaly Kuznetsov
2022-05-11 11:31 ` Maxim Levitsky
2022-05-16 20:16 ` Sean Christopherson
2022-04-14 13:19 ` [PATCH v3 20/34] KVM: x86: KVM_REQ_TLB_FLUSH_CURRENT is a superset of KVM_REQ_HV_TLB_FLUSH too Vitaly Kuznetsov
2022-05-11 11:33 ` Maxim Levitsky
2022-05-19 9:12 ` Vitaly Kuznetsov
2022-05-19 23:44 ` Sean Christopherson
2022-04-14 13:20 ` [PATCH v3 21/34] KVM: nSVM: hyper-v: Enable L2 TLB flush Vitaly Kuznetsov
2022-05-11 11:33 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 22/34] KVM: x86: Expose Hyper-V L2 TLB flush feature Vitaly Kuznetsov
2022-05-11 11:34 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 23/34] KVM: selftests: Better XMM read/write helpers Vitaly Kuznetsov
2022-05-11 11:34 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 24/34] KVM: selftests: Hyper-V PV IPI selftest Vitaly Kuznetsov
2022-05-11 11:35 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 25/34] KVM: selftests: Make it possible to replace PTEs with __virt_pg_map() Vitaly Kuznetsov
2022-05-11 11:34 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 26/34] KVM: selftests: Hyper-V PV TLB flush selftest Vitaly Kuznetsov
2022-05-11 12:17 ` Maxim Levitsky
2022-05-24 14:51 ` Vitaly Kuznetsov [this message]
2022-04-14 13:20 ` [PATCH v3 27/34] KVM: selftests: Sync 'struct hv_enlightened_vmcs' definition with hyperv-tlfs.h Vitaly Kuznetsov
2022-05-11 12:17 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 28/34] KVM: selftests: nVMX: Allocate Hyper-V partition assist page Vitaly Kuznetsov
2022-05-11 12:17 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 29/34] KVM: selftests: nSVM: Allocate Hyper-V partition assist and VP assist pages Vitaly Kuznetsov
2022-05-11 12:17 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 30/34] KVM: selftests: Sync 'struct hv_vp_assist_page' definition with hyperv-tlfs.h Vitaly Kuznetsov
2022-05-11 12:18 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 31/34] KVM: selftests: evmcs_test: Introduce L2 TLB flush test Vitaly Kuznetsov
2022-05-11 12:18 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 32/34] KVM: selftests: Move Hyper-V VP assist page enablement out of evmcs.h Vitaly Kuznetsov
2022-05-11 12:18 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 33/34] KVM: selftests: hyperv_svm_test: Introduce L2 TLB flush test Vitaly Kuznetsov
2022-05-11 12:19 ` Maxim Levitsky
2022-04-14 13:20 ` [PATCH v3 34/34] KVM: x86: Rename 'enable_direct_tlbflush' to 'enable_l2_tlb_flush' Vitaly Kuznetsov
2022-05-11 12:18 ` Maxim Levitsky
2022-05-03 15:01 ` [PATCH v3 00/34] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush feature Vitaly Kuznetsov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87czg3rnkr.fsf@redhat.com \
--to=vkuznets@redhat.com \
--cc=jmattson@google.com \
--cc=kvm@vger.kernel.org \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mikelley@microsoft.com \
--cc=mlevitsk@redhat.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=sidcha@amazon.de \
--cc=wanpengli@tencent.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).