From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"Paul Durrant" <paul@xen.org>,
"Joao Martins" <joao.m.martins@oracle.com>,
"Ankur Arora" <ankur.a.arora@oracle.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Thomas Huth" <thuth@redhat.com>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Juan Quintela" <quintela@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Claudio Fontana" <cfontana@suse.de>
Subject: [RFC PATCH v3 18/38] i386/xen: handle VCPUOP_register_vcpu_info
Date: Fri, 16 Dec 2022 00:40:57 +0000 [thread overview]
Message-ID: <20221216004117.862106-19-dwmw2@infradead.org> (raw)
In-Reply-To: <20221216004117.862106-1-dwmw2@infradead.org>
From: Joao Martins <joao.m.martins@oracle.com>
Handle the hypercall to set a per vcpu info, and also wire up the default
vcpu_info in the shared_info page for the first 32 vCPUs.
To avoid deadlock within KVM a vCPU thread must set its *own* vcpu_info
rather than it being set from the context in which the hypercall is
invoked.
Add the vcpu_info (and default) GPA to the vmstate_x86_cpu for migration,
and restore it in kvm_arch_put_registers() appropriately.
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
target/i386/cpu.h | 2 +
target/i386/kvm/kvm.c | 19 +++++++++
target/i386/kvm/trace-events | 1 +
target/i386/kvm/xen-emu.c | 78 ++++++++++++++++++++++++++++++++++--
target/i386/kvm/xen-emu.h | 1 +
target/i386/machine.c | 21 ++++++++++
6 files changed, 119 insertions(+), 3 deletions(-)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c6c57baed5..109b2e5669 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1788,6 +1788,8 @@ typedef struct CPUArchState {
#endif
#if defined(CONFIG_KVM)
struct kvm_nested_state *nested_state;
+ uint64_t xen_vcpu_info_gpa;
+ uint64_t xen_vcpu_info_default_gpa;
#endif
#if defined(CONFIG_HVF)
HVFX86LazyFlags hvf_lflags;
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index c37e44d88f..8affe1eeae 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -1802,6 +1802,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
has_msr_hv_hypercall = true;
}
+ env->xen_vcpu_info_gpa = UINT64_MAX;
+ env->xen_vcpu_info_default_gpa = UINT64_MAX;
+
if (cs->kvm_state->xen_version) {
#ifdef CONFIG_XEN_EMU
struct kvm_cpuid_entry2 *xen_max_leaf;
@@ -4723,6 +4726,22 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
kvm_arch_set_tsc_khz(cpu);
}
+#ifdef CONFIG_XEN_EMU
+ if (level == KVM_PUT_FULL_STATE) {
+ uint64_t gpa = x86_cpu->env.xen_vcpu_info_gpa;
+ if (gpa == UINT64_MAX) {
+ gpa = x86_cpu->env.xen_vcpu_info_default_gpa;
+ }
+
+ if (gpa != UINT64_MAX) {
+ ret = kvm_xen_set_vcpu_attr(cpu, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+#endif
+
ret = kvm_getput_regs(x86_cpu, 1);
if (ret < 0) {
return ret;
diff --git a/target/i386/kvm/trace-events b/target/i386/kvm/trace-events
index 0a47c26e80..14e54dfca5 100644
--- a/target/i386/kvm/trace-events
+++ b/target/i386/kvm/trace-events
@@ -9,3 +9,4 @@ kvm_x86_update_msi_routes(int num) "Updated %d MSI routes"
# xen-emu.c
kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64
kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
+kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index 83d98cbfd9..25c48248ce 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -147,10 +147,47 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
return true;
}
+int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
+{
+ struct kvm_xen_vcpu_attr xhsi;
+
+ xhsi.type = type;
+ xhsi.u.gpa = gpa;
+
+ trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa);
+
+ return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
+}
+
+static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_default_gpa = data.host_ulong;
+
+ /* Changing the default does nothing if a vcpu_info was explicitly set. */
+ if (env->xen_vcpu_info_gpa == UINT64_MAX) {
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ env->xen_vcpu_info_default_gpa);
+ }
+}
+
+static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_gpa = data.host_ulong;
+
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ env->xen_vcpu_info_gpa);
+}
+
static int xen_set_shared_info(uint64_t gfn)
{
uint64_t gpa = gfn << TARGET_PAGE_BITS;
- int err;
+ int i, err;
/* The xen_overlay device tells KVM about it too, since it had to
* do that on migration load anyway (unless we're going to jump
@@ -162,6 +199,14 @@ static int xen_set_shared_info(uint64_t gfn)
trace_kvm_xen_set_shared_info(gfn);
+ for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) {
+ CPUState *cpu = qemu_get_cpu(i);
+ if (cpu) {
+ async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa, RUN_ON_CPU_HOST_ULONG(gpa));
+ }
+ gpa += sizeof(vcpu_info_t);
+ }
+
return err;
}
@@ -318,15 +363,42 @@ static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
}
}
+static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
+ uint64_t arg)
+{
+ struct vcpu_register_vcpu_info rvi;
+ uint64_t gpa;
+
+ /* No need for 32/64 compat handling */
+ qemu_build_assert(sizeof(rvi) == 16);
+ qemu_build_assert(sizeof(struct vcpu_info) == 64);
+
+ if (!target)
+ return -ENOENT;
+
+ if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) {
+ return -EFAULT;
+ }
+
+ if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) {
+ return -EINVAL;
+ }
+
+ gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset);
+ async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa));
+ return 0;
+}
+
static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, int vcpu_id, uint64_t arg)
{
+ CPUState *dest = qemu_get_cpu(vcpu_id);
+ CPUState *cs = CPU(cpu);
int err;
switch (cmd) {
case VCPUOP_register_vcpu_info:
- /* no vcpu info placement for now */
- err = -ENOSYS;
+ err = vcpuop_register_vcpu_info(cs, dest, arg);
break;
default:
diff --git a/target/i386/kvm/xen-emu.h b/target/i386/kvm/xen-emu.h
index 76a3de6c4d..20da62d39f 100644
--- a/target/i386/kvm/xen-emu.h
+++ b/target/i386/kvm/xen-emu.h
@@ -25,5 +25,6 @@
int kvm_xen_init(KVMState *s, uint32_t hypercall_msr);
int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit);
+int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa);
#endif /* QEMU_I386_KVM_XEN_EMU_H */
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 310b125235..104cd6047c 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1257,6 +1257,26 @@ static const VMStateDescription vmstate_nested_state = {
}
};
+static bool xen_vcpu_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return (env->xen_vcpu_info_gpa != UINT64_MAX ||
+ env->xen_vcpu_info_default_gpa != UINT64_MAX);
+}
+
+static const VMStateDescription vmstate_xen_vcpu = {
+ .name = "cpu/xen_vcpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xen_vcpu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
+ VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
#endif
static bool mcg_ext_ctl_needed(void *opaque)
@@ -1716,6 +1736,7 @@ const VMStateDescription vmstate_x86_cpu = {
#endif
#ifdef CONFIG_KVM
&vmstate_nested_state,
+ &vmstate_xen_vcpu,
#endif
&vmstate_msr_tsx_ctrl,
&vmstate_msr_intel_sgx,
--
2.35.3
next prev parent reply other threads:[~2022-12-16 0:50 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-16 0:40 [RFC PATCH v3 00/38] Xen HVM support under KVM David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 01/38] include: import xen public headers David Woodhouse
2022-12-20 14:14 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 02/38] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2022-12-20 14:33 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 03/38] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2022-12-20 14:39 ` Paul Durrant
2022-12-20 22:59 ` David Woodhouse
2022-12-21 9:49 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 04/38] i386/kvm: Add xen-version machine property and init KVM Xen support David Woodhouse
2022-12-20 14:47 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 05/38] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2022-12-20 14:58 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 06/38] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2022-12-20 16:18 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 07/38] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2022-12-20 16:19 ` Paul Durrant
2022-12-20 16:29 ` David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 08/38] hw/xen_backend: refactor xen_be_init() David Woodhouse
2022-12-20 16:22 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 09/38] i386/xen: handle guest hypercalls David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 10/38] i386/xen: implement HYPERCALL_xen_version David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 11/38] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 12/38] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 13/38] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 14/38] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 15/38] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 16/38] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 17/38] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2022-12-16 0:40 ` David Woodhouse [this message]
2022-12-16 0:40 ` [RFC PATCH v3 19/38] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 20/38] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 21/38] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 22/38] i386/xen: HVMOP_set_param / HVM_PARAM_CALLBACK_IRQ David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 23/38] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 24/38] i386/xen: implement HYPERVISOR_sched_op David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 25/38] hw/xen: Implement EVTCHNOP_status David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 26/38] hw/xen: Implement EVTCHNOP_close David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 27/38] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 28/38] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 29/38] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 30/38] hw/xen: Implement EVTCHNOP_send David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 31/38] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 32/38] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 33/38] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 34/38] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 35/38] i386/xen: add monitor commands to test event injection David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 36/38] i386/xen: Implement SCHEDOP_poll David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 37/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 38/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221216004117.862106-19-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=alex.bennee@linaro.org \
--cc=ankur.a.arora@oracle.com \
--cc=cfontana@suse.de \
--cc=dgilbert@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).