From: Wei Wang <wei.w.wang@intel.com>
To: seanjc@google.com, pbonzini@redhat.com
Cc: dmatlack@google.com, vipinsh@google.com, ajones@ventanamicro.com,
eric.auger@redhat.com, kvm@vger.kernel.org,
linux-kernel@vger.kernel.org, Wei Wang <wei.w.wang@intel.com>
Subject: [PATCH v1 01/18] KVM: selftests/kvm_util: use array of pointers to maintain vcpus in kvm_vm
Date: Mon, 24 Oct 2022 19:34:28 +0800 [thread overview]
Message-ID: <20221024113445.1022147-2-wei.w.wang@intel.com> (raw)
In-Reply-To: <20221024113445.1022147-1-wei.w.wang@intel.com>
Each vcpu has an id associated with it and is intrinsically faster
and easier to be referenced by indexing into an array with "vcpu->id",
compared to using a list of vcpus in the current implementation. Change
the vcpu list to an array of vcpu pointers. Users then don't need to
allocate such a vcpu array on their own, and instead, they can reuse
the one maintained in kvm_vm.
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
.../testing/selftests/kvm/include/kvm_util.h | 4 +++
.../selftests/kvm/include/kvm_util_base.h | 3 +-
tools/testing/selftests/kvm/lib/kvm_util.c | 34 ++++++-------------
tools/testing/selftests/kvm/lib/x86_64/vmx.c | 2 +-
4 files changed, 17 insertions(+), 26 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index c9286811a4cb..5d5c8968fb06 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -10,4 +10,8 @@
#include "kvm_util_base.h"
#include "ucall_common.h"
+#define vm_iterate_over_vcpus(vm, vcpu, i) \
+ for (i = 0, vcpu = vm->vcpus[0]; \
+ vcpu && i < KVM_MAX_VCPUS; vcpu = vm->vcpus[++i])
+
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index e42a09cd24a0..c90a9609b853 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -45,7 +45,6 @@ struct userspace_mem_region {
};
struct kvm_vcpu {
- struct list_head list;
uint32_t id;
int fd;
struct kvm_vm *vm;
@@ -75,7 +74,6 @@ struct kvm_vm {
unsigned int pa_bits;
unsigned int va_bits;
uint64_t max_gfn;
- struct list_head vcpus;
struct userspace_mem_regions regions;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
@@ -92,6 +90,7 @@ struct kvm_vm {
int stats_fd;
struct kvm_stats_header stats_header;
struct kvm_stats_desc *stats_desc;
+ struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
};
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index f1cb1627161f..941f6c3ea9dc 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -195,7 +195,6 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");
- INIT_LIST_HEAD(&vm->vcpus);
vm->regions.gpa_tree = RB_ROOT;
vm->regions.hva_tree = RB_ROOT;
hash_init(vm->regions.slot_hash);
@@ -534,6 +533,10 @@ __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
int ret;
+ uint32_t vcpu_id = vcpu->id;
+
+ TEST_ASSERT(!!vm->vcpus[vcpu_id], "vCPU%d wasn't added\n", vcpu_id);
+ vm->vcpus[vcpu_id] = NULL;
if (vcpu->dirty_gfns) {
ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
@@ -547,18 +550,16 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
ret = close(vcpu->fd);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
- list_del(&vcpu->list);
-
vcpu_arch_free(vcpu);
free(vcpu);
}
void kvm_vm_release(struct kvm_vm *vmp)
{
- struct kvm_vcpu *vcpu, *tmp;
- int ret;
+ struct kvm_vcpu *vcpu;
+ int i, ret;
- list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
+ vm_iterate_over_vcpus(vmp, vcpu, i)
vm_vcpu_rm(vmp, vcpu);
ret = close(vmp->fd);
@@ -1085,18 +1086,6 @@ static int vcpu_mmap_sz(void)
return ret;
}
-static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
-{
- struct kvm_vcpu *vcpu;
-
- list_for_each_entry(vcpu, &vm->vcpus, list) {
- if (vcpu->id == vcpu_id)
- return true;
- }
-
- return false;
-}
-
/*
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
* No additional vCPU setup is done. Returns the vCPU.
@@ -1106,7 +1095,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
struct kvm_vcpu *vcpu;
/* Confirm a vcpu with the specified id doesn't already exist. */
- TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
+ TEST_ASSERT(!vm->vcpus[vcpu_id], "vCPU%d already exists\n", vcpu_id);
/* Allocate and initialize new vcpu structure. */
vcpu = calloc(1, sizeof(*vcpu));
@@ -1125,8 +1114,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT(vcpu->run != MAP_FAILED,
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
- /* Add to linked-list of VCPUs. */
- list_add(&vcpu->list, &vm->vcpus);
+ vm->vcpus[vcpu_id] = vcpu;
return vcpu;
}
@@ -1684,7 +1672,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
*/
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
- int ctr;
+ int i, ctr;
struct userspace_mem_region *region;
struct kvm_vcpu *vcpu;
@@ -1712,7 +1700,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
fprintf(stream, "%*sVCPUs:\n", indent, "");
- list_for_each_entry(vcpu, &vm->vcpus, list)
+ vm_iterate_over_vcpus(vm, vcpu, i)
vcpu_dump(stream, vcpu, indent + 2);
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index d21049c38fc5..77812dd03647 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -549,7 +549,7 @@ bool kvm_vm_has_ept(struct kvm_vm *vm)
struct kvm_vcpu *vcpu;
uint64_t ctrl;
- vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
+ vcpu = vm->vcpus[0];
TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");
ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
--
2.27.0
next prev parent reply other threads:[~2022-10-24 11:38 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-24 11:34 [PATCH v1 00/18] KVM selftests code consolidation and cleanup Wei Wang
2022-10-24 11:34 ` Wei Wang [this message]
2022-10-26 23:47 ` [PATCH v1 01/18] KVM: selftests/kvm_util: use array of pointers to maintain vcpus in kvm_vm Sean Christopherson
2022-10-27 12:28 ` Wang, Wei W
2022-10-27 15:27 ` Sean Christopherson
2022-10-28 2:13 ` Wang, Wei W
2022-10-24 11:34 ` [PATCH v1 02/18] KVM: selftests/kvm_util: use vm->vcpus[] when create vm with vcpus Wei Wang
2022-10-24 11:34 ` [PATCH v1 03/18] KVM: selftests/kvm_util: helper functions for vcpus and threads Wei Wang
2022-10-27 0:09 ` Sean Christopherson
2022-10-27 14:02 ` Wang, Wei W
2022-10-27 14:54 ` Sean Christopherson
2022-10-24 11:34 ` [PATCH v1 04/18] KVM: selftests/kvm_page_table_test: vcpu related code consolidation Wei Wang
2022-10-24 11:34 ` [PATCH v1 05/18] KVM: selftests/hardware_disable_test: code consolidation and cleanup Wei Wang
2022-10-27 0:16 ` Sean Christopherson
2022-10-27 14:14 ` Wang, Wei W
2022-10-27 18:03 ` Sean Christopherson
2022-10-28 2:16 ` Wang, Wei W
2022-10-24 11:34 ` [PATCH v1 06/18] KVM: selftests/dirty_log_test: vcpu related code consolidation Wei Wang
2022-10-24 11:34 ` [PATCH v1 07/18] KVM: selftests/max_guest_memory_test: " Wei Wang
2022-10-24 11:34 ` [PATCH v1 08/18] KVM: selftests/set_memory_region_test: " Wei Wang
2022-10-24 11:34 ` [PATCH v1 09/18] KVM: selftests/steal_time: vcpu related code consolidation and cleanup Wei Wang
2022-10-27 0:17 ` Sean Christopherson
2022-10-24 11:34 ` [PATCH v1 10/18] KVM: selftests/tsc_scaling_sync: vcpu related code consolidation Wei Wang
2022-10-24 11:34 ` [PATCH v1 11/18] KVM: selftest/xapic_ipi_test: " Wei Wang
2022-10-24 11:34 ` [PATCH v1 12/18] KVM: selftests/rseq_test: name the migration thread and some cleanup Wei Wang
2022-10-27 0:18 ` Sean Christopherson
2022-10-24 11:34 ` [PATCH v1 13/18] KVM: selftests/perf_test_util: vcpu related code consolidation Wei Wang
2022-10-24 11:34 ` [PATCH v1 14/18] KVM: selftest/memslot_perf_test: " Wei Wang
2022-10-24 11:34 ` [PATCH v1 15/18] KVM: selftests/vgic_init: " Wei Wang
2022-10-24 11:34 ` [PATCH v1 16/18] KVM: selftest/arch_timer: " Wei Wang
2022-10-24 11:34 ` [PATCH v1 17/18] KVM: selftests: remove the *vcpu[] input from __vm_create_with_vcpus Wei Wang
2022-10-24 11:34 ` [PATCH v1 18/18] KVM: selftests/kvm_create_max_vcpus: check KVM_MAX_VCPUS Wei Wang
2022-10-27 0:22 ` Sean Christopherson
2022-10-26 21:22 ` [PATCH v1 00/18] KVM selftests code consolidation and cleanup David Matlack
2022-10-27 12:18 ` Wang, Wei W
2022-10-27 15:44 ` Sean Christopherson
2022-10-27 16:24 ` David Matlack
2022-10-27 18:27 ` Sean Christopherson
2022-10-28 12:41 ` Andrew Jones
2022-10-28 15:49 ` Sean Christopherson
2022-11-07 18:11 ` David Matlack
2022-11-07 18:19 ` Sean Christopherson
2022-11-09 19:05 ` David Matlack
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221024113445.1022147-2-wei.w.wang@intel.com \
--to=wei.w.wang@intel.com \
--cc=ajones@ventanamicro.com \
--cc=dmatlack@google.com \
--cc=eric.auger@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=vipinsh@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox