From: Shaoqin Huang <shahuang@redhat.com>
To: qemu-devel@nongnu.org
Subject: Re: [PATCH V5 1/9] accel/kvm: Extract common KVM vCPU {creation, parking} code
Date: Mon, 16 Oct 2023 10:45:56 +0800 [thread overview]
Message-ID: <5072f590-a3e1-893d-9722-0499b8bbfce8@redhat.com> (raw)
In-Reply-To: <20231011194355.15628-2-salil.mehta@huawei.com>
On 10/12/23 03:43, Salil Mehta via wrote:
> KVM vCPU creation is done once during the initialization of the VM when Qemu
> thread is spawned. This is common to all the architectures.
>
> Hot-unplug of vCPU results in destruction of the vCPU object in QOM but the
> corresponding KVM vCPU object in the Host KVM is not destroyed and its
> representative KVM vCPU object/context in Qemu is parked.
>
> Refactor common logic so that some APIs could be reused by vCPU Hotplug code.
>
> Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
> Reviewed-by: Gavin Shan <gshan@redhat.com>
> Tested-by: Vishnu Pajjuri <vishnu@os.amperecomputing.com>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
> ---
> accel/kvm/kvm-all.c | 64 ++++++++++++++++++++++++++++++++----------
> accel/kvm/trace-events | 4 +++
> include/sysemu/kvm.h | 16 +++++++++++
> 3 files changed, 69 insertions(+), 15 deletions(-)
>
> diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
> index ff1578bb32..0dcaa15276 100644
> --- a/accel/kvm/kvm-all.c
> +++ b/accel/kvm/kvm-all.c
> @@ -137,6 +137,7 @@ static QemuMutex kml_slots_lock;
> #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
>
> static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
> +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id);
>
> static inline void kvm_resample_fd_remove(int gsi)
> {
> @@ -320,14 +321,53 @@ err:
> return ret;
> }
>
> +void kvm_park_vcpu(CPUState *cpu)
> +{
> + struct KVMParkedVcpu *vcpu;
> +
> + trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
> +
> + vcpu = g_malloc0(sizeof(*vcpu));
> + vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
> + vcpu->kvm_fd = cpu->kvm_fd;
> + QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
> +}
> +
> +int kvm_create_vcpu(CPUState *cpu)
> +{
> + unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
> + KVMState *s = kvm_state;
> + int kvm_fd;
> +
> + trace_kvm_create_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
> +
> + /* check if the KVM vCPU already exist but is parked */
> + kvm_fd = kvm_get_vcpu(s, vcpu_id);
> + if (kvm_fd < 0) {
> + /* vCPU not parked: create a new KVM vCPU */
> + kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
> + if (kvm_fd < 0) {
> + error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
> + return kvm_fd;
> + }
> + }
> +
> + cpu->kvm_fd = kvm_fd;
> + cpu->kvm_state = s;
> + cpu->vcpu_dirty = true;
> + cpu->dirty_pages = 0;
> + cpu->throttle_us_per_full = 0;
> +
> + return 0;
> +}
> +
> static int do_kvm_destroy_vcpu(CPUState *cpu)
> {
> KVMState *s = kvm_state;
> long mmap_size;
> - struct KVMParkedVcpu *vcpu = NULL;
> int ret = 0;
>
> - DPRINTF("kvm_destroy_vcpu\n");
> + trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
>
> ret = kvm_arch_destroy_vcpu(cpu);
> if (ret < 0) {
> @@ -353,10 +393,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
> }
> }
>
> - vcpu = g_malloc0(sizeof(*vcpu));
> - vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
> - vcpu->kvm_fd = cpu->kvm_fd;
> - QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
> + kvm_park_vcpu(cpu);
> err:
> return ret;
> }
> @@ -377,6 +414,8 @@ static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
> if (cpu->vcpu_id == vcpu_id) {
> int kvm_fd;
>
> + trace_kvm_get_vcpu(vcpu_id);
> +
> QLIST_REMOVE(cpu, node);
> kvm_fd = cpu->kvm_fd;
> g_free(cpu);
> @@ -384,7 +423,7 @@ static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
> }
> }
>
> - return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
> + return -ENOENT;
> }
>
> int kvm_init_vcpu(CPUState *cpu, Error **errp)
> @@ -395,19 +434,14 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
>
> trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
>
> - ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
> + ret = kvm_create_vcpu(cpu);
> if (ret < 0) {
> - error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
> + error_setg_errno(errp, -ret,
> + "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
> kvm_arch_vcpu_id(cpu));
> goto err;
> }
>
> - cpu->kvm_fd = ret;
> - cpu->kvm_state = s;
> - cpu->vcpu_dirty = true;
> - cpu->dirty_pages = 0;
> - cpu->throttle_us_per_full = 0;
> -
> mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
> if (mmap_size < 0) {
> ret = mmap_size;
> diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events
> index 399aaeb0ec..cdd0c95c09 100644
> --- a/accel/kvm/trace-events
> +++ b/accel/kvm/trace-events
> @@ -9,6 +9,10 @@ kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p"
> kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
> kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
> kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
> +kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
> +kvm_get_vcpu(unsigned long arch_cpu_id) "id: %lu"
> +kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
> +kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
> kvm_irqchip_commit_routes(void) ""
> kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
> kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
> diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
> index ee9025f8e9..740686ab60 100644
> --- a/include/sysemu/kvm.h
> +++ b/include/sysemu/kvm.h
> @@ -465,6 +465,22 @@ void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
> int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
> hwaddr *phys_addr);
>
> +/**
> + * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
> + * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
> + *
> + * @returns: 0 when success, errno (<0) when failed.
> + */
> +int kvm_create_vcpu(CPUState *cpu);
> +
> +/**
> + * kvm_park_vcpu - Park Qemu KVM vCPU context
> + * @cpu: QOM CPUState object for which Qemu KVM vCPU context has to be parked.
> + *
> + * @returns: none
> + */
> +void kvm_park_vcpu(CPUState *cpu);
> +
> #endif /* NEED_CPU_H */
>
> void kvm_cpu_synchronize_state(CPUState *cpu);
--
Shaoqin
next prev parent reply other threads:[~2023-10-16 2:46 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-11 19:43 [PATCH V5 0/9] Add architecture agnostic code to support vCPU Hotplug Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-11 19:43 ` [PATCH V5 1/9] accel/kvm: Extract common KVM vCPU {creation, parking} code Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-12 14:36 ` [PATCH V5 1/9] accel/kvm: Extract common KVM vCPU {creation,parking} code Jonathan Cameron via
2023-10-12 14:36 ` Jonathan Cameron
2023-10-12 14:38 ` Salil Mehta via
2023-10-12 14:38 ` Salil Mehta
2023-10-16 2:45 ` Shaoqin Huang [this message]
2023-10-16 2:47 ` [PATCH V5 1/9] accel/kvm: Extract common KVM vCPU {creation, parking} code Shaoqin Huang
2023-10-11 19:43 ` [PATCH V5 2/9] hw/acpi: Move CPU ctrl-dev MMIO region len macro to common header file Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-11 19:43 ` [PATCH V5 3/9] hw/acpi: Add ACPI CPU hotplug init stub Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-11 19:43 ` [PATCH V5 4/9] hw/acpi: Init GED framework with CPU hotplug events Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-16 2:53 ` Shaoqin Huang
2023-10-16 10:01 ` Salil Mehta via
2023-10-16 10:01 ` Salil Mehta
2023-10-11 19:43 ` [PATCH V5 5/9] hw/acpi: Update CPUs AML with cpu-(ctrl)dev change Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-12 14:49 ` Jonathan Cameron via
2023-10-12 14:49 ` Jonathan Cameron
2023-10-11 19:43 ` [PATCH V5 6/9] hw/acpi: Update GED _EVT method AML with CPU scan Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-11 19:43 ` [PATCH V5 7/9] hw/acpi: Update ACPI GED framework to support vCPU Hotplug Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-11 19:43 ` [PATCH V5 8/9] physmem: Add helper function to destroy CPU AddressSpace Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-11 23:31 ` Gavin Shan
2023-10-12 0:04 ` Salil Mehta
2023-10-12 0:18 ` Gavin Shan
2023-10-12 9:22 ` Salil Mehta via
2023-10-12 9:22 ` Salil Mehta
2023-10-11 19:43 ` [PATCH V5 9/9] gdbstub: Add helper function to unregister GDB register space Salil Mehta via
2023-10-11 19:43 ` Salil Mehta
2023-10-12 0:07 ` Gavin Shan
2023-10-12 9:19 ` Salil Mehta via
2023-10-12 9:19 ` Salil Mehta
2023-10-13 3:58 ` [PATCH V5 0/9] Add architecture agnostic code to support vCPU Hotplug lixianglai
2023-10-13 10:21 ` Salil Mehta via
2023-10-13 10:21 ` Salil Mehta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5072f590-a3e1-893d-9722-0499b8bbfce8@redhat.com \
--to=shahuang@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).