From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: Joao Martins <joao.m.martins@oracle.com>,
David Woodhouse <dwmw@amazon.co.uk>, Paul Durrant <paul@xen.org>
Subject: [PULL 21/62] i386/xen: handle VCPUOP_register_vcpu_info
Date: Thu, 2 Mar 2023 13:29:48 +0100 [thread overview]
Message-ID: <20230302123029.153265-22-pbonzini@redhat.com> (raw)
In-Reply-To: <20230302123029.153265-1-pbonzini@redhat.com>
From: Joao Martins <joao.m.martins@oracle.com>
Handle the hypercall to set a per vcpu info, and also wire up the default
vcpu_info in the shared_info page for the first 32 vCPUs.
To avoid deadlock within KVM a vCPU thread must set its *own* vcpu_info
rather than it being set from the context in which the hypercall is
invoked.
Add the vcpu_info (and default) GPA to the vmstate_x86_cpu for migration,
and restore it in kvm_arch_put_registers() appropriately.
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
---
target/i386/cpu.h | 2 +
target/i386/kvm/kvm.c | 17 ++++
target/i386/kvm/trace-events | 1 +
target/i386/kvm/xen-emu.c | 153 ++++++++++++++++++++++++++++++++++-
target/i386/kvm/xen-emu.h | 2 +
target/i386/machine.c | 19 +++++
6 files changed, 191 insertions(+), 3 deletions(-)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 5069adfbe7e0..9070efdc5184 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1799,6 +1799,8 @@ typedef struct CPUArchState {
#endif
#if defined(CONFIG_KVM)
struct kvm_nested_state *nested_state;
+ uint64_t xen_vcpu_info_gpa;
+ uint64_t xen_vcpu_info_default_gpa;
#endif
#if defined(CONFIG_HVF)
HVFX86LazyFlags hvf_lflags;
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index f43e5531bf85..5a144ec0de3a 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -4750,6 +4750,15 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
kvm_arch_set_tsc_khz(cpu);
}
+#ifdef CONFIG_XEN_EMU
+ if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) {
+ ret = kvm_put_xen_state(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+#endif
+
ret = kvm_getput_regs(x86_cpu, 1);
if (ret < 0) {
return ret;
@@ -4849,6 +4858,14 @@ int kvm_arch_get_registers(CPUState *cs)
if (ret < 0) {
goto out;
}
+#ifdef CONFIG_XEN_EMU
+ if (xen_mode == XEN_EMULATE) {
+ ret = kvm_get_xen_state(cs);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+#endif
ret = 0;
out:
cpu_sync_bndcs_hflags(&cpu->env);
diff --git a/target/i386/kvm/trace-events b/target/i386/kvm/trace-events
index 8e9f269f5671..a840e0333de7 100644
--- a/target/i386/kvm/trace-events
+++ b/target/i386/kvm/trace-events
@@ -10,3 +10,4 @@ kvm_x86_update_msi_routes(int num) "Updated %d MSI routes"
kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64
kvm_xen_soft_reset(void) ""
kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
+kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index e5ae0a9a386d..30b4789da394 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -119,6 +119,8 @@ int kvm_xen_init(KVMState *s, uint32_t hypercall_msr)
int kvm_xen_init_vcpu(CPUState *cs)
{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
int err;
/*
@@ -142,6 +144,9 @@ int kvm_xen_init_vcpu(CPUState *cs)
}
}
+ env->xen_vcpu_info_gpa = INVALID_GPA;
+ env->xen_vcpu_info_default_gpa = INVALID_GPA;
+
return 0;
}
@@ -187,10 +192,58 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
return true;
}
+static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
+{
+ struct kvm_xen_vcpu_attr xhsi;
+
+ xhsi.type = type;
+ xhsi.u.gpa = gpa;
+
+ trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa);
+
+ return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
+}
+
+static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_default_gpa = data.host_ulong;
+
+ /* Changing the default does nothing if a vcpu_info was explicitly set. */
+ if (env->xen_vcpu_info_gpa == INVALID_GPA) {
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ env->xen_vcpu_info_default_gpa);
+ }
+}
+
+static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_gpa = data.host_ulong;
+
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ env->xen_vcpu_info_gpa);
+}
+
+static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_gpa = INVALID_GPA;
+ env->xen_vcpu_info_default_gpa = INVALID_GPA;
+
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
+}
+
static int xen_set_shared_info(uint64_t gfn)
{
uint64_t gpa = gfn << TARGET_PAGE_BITS;
- int err;
+ int i, err;
QEMU_IOTHREAD_LOCK_GUARD();
@@ -207,6 +260,15 @@ static int xen_set_shared_info(uint64_t gfn)
trace_kvm_xen_set_shared_info(gfn);
+ for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) {
+ CPUState *cpu = qemu_get_cpu(i);
+ if (cpu) {
+ async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa,
+ RUN_ON_CPU_HOST_ULONG(gpa));
+ }
+ gpa += sizeof(vcpu_info_t);
+ }
+
return err;
}
@@ -364,15 +426,43 @@ static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
}
}
+static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
+ uint64_t arg)
+{
+ struct vcpu_register_vcpu_info rvi;
+ uint64_t gpa;
+
+ /* No need for 32/64 compat handling */
+ qemu_build_assert(sizeof(rvi) == 16);
+ qemu_build_assert(sizeof(struct vcpu_info) == 64);
+
+ if (!target) {
+ return -ENOENT;
+ }
+
+ if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) {
+ return -EFAULT;
+ }
+
+ if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) {
+ return -EINVAL;
+ }
+
+ gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset);
+ async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa));
+ return 0;
+}
+
static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, int vcpu_id, uint64_t arg)
{
+ CPUState *dest = qemu_get_cpu(vcpu_id);
+ CPUState *cs = CPU(cpu);
int err;
switch (cmd) {
case VCPUOP_register_vcpu_info:
- /* no vcpu info placement for now */
- err = -ENOSYS;
+ err = vcpuop_register_vcpu_info(cs, dest, arg);
break;
default:
@@ -385,12 +475,17 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int kvm_xen_soft_reset(void)
{
+ CPUState *cpu;
int err;
assert(qemu_mutex_iothread_locked());
trace_kvm_xen_soft_reset();
+ CPU_FOREACH(cpu) {
+ async_run_on_cpu(cpu, do_vcpu_soft_reset, RUN_ON_CPU_NULL);
+ }
+
err = xen_overlay_map_shinfo_page(INVALID_GFN);
if (err) {
return err;
@@ -539,3 +634,55 @@ int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
exit->u.hcall.result);
return 0;
}
+
+int kvm_put_xen_state(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint64_t gpa;
+ int ret;
+
+ gpa = env->xen_vcpu_info_gpa;
+ if (gpa == INVALID_GPA) {
+ gpa = env->xen_vcpu_info_default_gpa;
+ }
+
+ if (gpa != INVALID_GPA) {
+ ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_get_xen_state(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint64_t gpa;
+
+ /*
+ * The kernel does not mark vcpu_info as dirty when it delivers interrupts
+ * to it. It's up to userspace to *assume* that any page shared thus is
+ * always considered dirty. The shared_info page is different since it's
+ * an overlay and migrated separately anyway.
+ */
+ gpa = env->xen_vcpu_info_gpa;
+ if (gpa == INVALID_GPA) {
+ gpa = env->xen_vcpu_info_default_gpa;
+ }
+ if (gpa != INVALID_GPA) {
+ MemoryRegionSection mrs = memory_region_find(get_system_memory(),
+ gpa,
+ sizeof(struct vcpu_info));
+ if (mrs.mr &&
+ !int128_lt(mrs.size, int128_make64(sizeof(struct vcpu_info)))) {
+ memory_region_set_dirty(mrs.mr, mrs.offset_within_region,
+ sizeof(struct vcpu_info));
+ }
+ }
+
+ return 0;
+}
diff --git a/target/i386/kvm/xen-emu.h b/target/i386/kvm/xen-emu.h
index 21faf6bf38fd..452605699abb 100644
--- a/target/i386/kvm/xen-emu.h
+++ b/target/i386/kvm/xen-emu.h
@@ -26,5 +26,7 @@
int kvm_xen_init(KVMState *s, uint32_t hypercall_msr);
int kvm_xen_init_vcpu(CPUState *cs);
int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit);
+int kvm_put_xen_state(CPUState *cs);
+int kvm_get_xen_state(CPUState *cs);
#endif /* QEMU_I386_KVM_XEN_EMU_H */
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 310b12523508..1215e616c8f4 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -6,8 +6,10 @@
#include "kvm/hyperv.h"
#include "hw/i386/x86.h"
#include "kvm/kvm_i386.h"
+#include "hw/xen/xen.h"
#include "sysemu/kvm.h"
+#include "sysemu/kvm_xen.h"
#include "sysemu/tcg.h"
#include "qemu/error-report.h"
@@ -1257,6 +1259,22 @@ static const VMStateDescription vmstate_nested_state = {
}
};
+static bool xen_vcpu_needed(void *opaque)
+{
+ return (xen_mode == XEN_EMULATE);
+}
+
+static const VMStateDescription vmstate_xen_vcpu = {
+ .name = "cpu/xen_vcpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xen_vcpu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
+ VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
#endif
static bool mcg_ext_ctl_needed(void *opaque)
@@ -1716,6 +1734,7 @@ const VMStateDescription vmstate_x86_cpu = {
#endif
#ifdef CONFIG_KVM
&vmstate_nested_state,
+ &vmstate_xen_vcpu,
#endif
&vmstate_msr_tsx_ctrl,
&vmstate_msr_intel_sgx,
--
2.39.1
next prev parent reply other threads:[~2023-03-02 12:41 UTC|newest]
Thread overview: 84+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-02 12:29 [PULL 00/62] i386, misc changes for QEMU 8.0 soft freeze Paolo Bonzini
2023-03-02 12:29 ` [PULL 01/62] include: import Xen public headers to hw/xen/interface Paolo Bonzini
2023-03-02 12:29 ` [PULL 02/62] xen: add CONFIG_XEN_BUS and CONFIG_XEN_EMU options for Xen emulation Paolo Bonzini
2023-03-02 12:29 ` [PULL 03/62] xen: Add XEN_DISABLED mode and make it default Paolo Bonzini
2023-03-02 12:29 ` [PULL 04/62] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support Paolo Bonzini
2023-03-02 12:29 ` [PULL 05/62] i386/kvm: handle Xen HVM cpuid leaves Paolo Bonzini
2023-03-02 12:29 ` [PULL 06/62] i386/kvm: Set Xen vCPU ID in KVM Paolo Bonzini
2023-03-02 12:29 ` [PULL 07/62] xen-platform: exclude vfio-pci from the PCI platform unplug Paolo Bonzini
2023-03-02 12:29 ` [PULL 08/62] xen-platform: allow its creation with XEN_EMULATE mode Paolo Bonzini
2023-03-02 12:29 ` [PULL 09/62] i386/xen: handle guest hypercalls Paolo Bonzini
2023-03-02 12:29 ` [PULL 10/62] i386/xen: implement HYPERVISOR_xen_version Paolo Bonzini
2023-03-02 12:29 ` [PULL 11/62] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown Paolo Bonzini
2023-03-02 12:29 ` [PULL 12/62] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield Paolo Bonzini
2023-03-02 12:29 ` [PULL 13/62] hw/xen: Add xen_overlay device for emulating shared xenheap pages Paolo Bonzini
2023-03-02 12:29 ` [PULL 14/62] xen: Permit --xen-domid argument when accel is KVM Paolo Bonzini
2023-03-02 12:29 ` [PULL 15/62] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode Paolo Bonzini
2023-03-02 12:29 ` [PULL 16/62] i386/xen: manage and save/restore Xen guest long_mode setting Paolo Bonzini
2023-03-02 12:29 ` [PULL 17/62] i386/xen: implement HYPERVISOR_memory_op Paolo Bonzini
2023-03-02 12:29 ` [PULL 18/62] i386/xen: implement XENMEM_add_to_physmap_batch Paolo Bonzini
2023-03-02 12:29 ` [PULL 19/62] i386/xen: implement HYPERVISOR_hvm_op Paolo Bonzini
2023-03-02 12:29 ` [PULL 20/62] i386/xen: implement HYPERVISOR_vcpu_op Paolo Bonzini
2023-03-02 12:29 ` Paolo Bonzini [this message]
2023-03-02 12:29 ` [PULL 22/62] i386/xen: handle VCPUOP_register_vcpu_time_info Paolo Bonzini
2023-03-02 12:29 ` [PULL 23/62] i386/xen: handle VCPUOP_register_runstate_memory_area Paolo Bonzini
2023-03-02 12:29 ` [PULL 24/62] i386/xen: implement HYPERVISOR_event_channel_op Paolo Bonzini
2023-03-02 12:29 ` [PULL 25/62] i386/xen: implement HVMOP_set_evtchn_upcall_vector Paolo Bonzini
2023-03-02 12:29 ` [PULL 26/62] i386/xen: implement HVMOP_set_param Paolo Bonzini
2023-03-02 12:29 ` [PULL 27/62] hw/xen: Add xen_evtchn device for event channel emulation Paolo Bonzini
2023-03-02 12:29 ` [PULL 28/62] i386/xen: Add support for Xen event channel delivery to vCPU Paolo Bonzini
2023-03-02 12:29 ` [PULL 29/62] hw/xen: Implement EVTCHNOP_status Paolo Bonzini
2023-03-02 12:29 ` [PULL 30/62] hw/xen: Implement EVTCHNOP_close Paolo Bonzini
2023-03-02 12:29 ` [PULL 31/62] hw/xen: Implement EVTCHNOP_unmask Paolo Bonzini
2023-03-02 12:29 ` [PULL 32/62] hw/xen: Implement EVTCHNOP_bind_virq Paolo Bonzini
2023-05-09 14:55 ` Peter Maydell
2023-05-22 18:52 ` [PATCH] i386/xen: consistent locking around Xen singleshot timers David Woodhouse
2023-05-23 13:46 ` Paul Durrant
2023-06-02 16:58 ` Peter Maydell
2023-07-04 15:49 ` David Woodhouse
2023-07-04 15:51 ` [PATCH v2] " David Woodhouse
2023-07-06 16:26 ` Paul Durrant
2023-03-02 12:30 ` [PULL 33/62] hw/xen: Implement EVTCHNOP_bind_ipi Paolo Bonzini
2023-03-02 12:30 ` [PULL 34/62] hw/xen: Implement EVTCHNOP_send Paolo Bonzini
2023-03-02 12:30 ` [PULL 35/62] hw/xen: Implement EVTCHNOP_alloc_unbound Paolo Bonzini
2023-03-02 12:30 ` [PULL 36/62] hw/xen: Implement EVTCHNOP_bind_interdomain Paolo Bonzini
2023-03-02 12:30 ` [PULL 37/62] hw/xen: Implement EVTCHNOP_bind_vcpu Paolo Bonzini
2023-03-02 12:30 ` [PULL 38/62] hw/xen: Implement EVTCHNOP_reset Paolo Bonzini
2023-03-02 12:30 ` [PULL 39/62] i386/xen: add monitor commands to test event injection Paolo Bonzini
2023-03-02 12:30 ` [PULL 40/62] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback Paolo Bonzini
2023-03-02 12:30 ` [PULL 41/62] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback Paolo Bonzini
2023-03-02 12:30 ` [PULL 42/62] kvm/i386: Add xen-gnttab-max-frames property Paolo Bonzini
2023-03-02 12:30 ` [PULL 43/62] hw/xen: Add xen_gnttab device for grant table emulation Paolo Bonzini
2023-03-02 12:30 ` [PULL 44/62] hw/xen: Support mapping grant frames Paolo Bonzini
2023-03-02 12:30 ` [PULL 45/62] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson Paolo Bonzini
2023-03-02 12:30 ` [PULL 46/62] hw/xen: Implement GNTTABOP_query_size Paolo Bonzini
2023-03-02 12:30 ` [PULL 47/62] i386/xen: handle PV timer hypercalls Paolo Bonzini
2023-03-02 12:30 ` [PULL 48/62] i386/xen: Reserve Xen special pages for console, xenstore rings Paolo Bonzini
2023-03-02 12:30 ` [PULL 49/62] i386/xen: handle HVMOP_get_param Paolo Bonzini
2023-03-02 12:30 ` [PULL 50/62] hw/xen: Add backend implementation of interdomain event channel support Paolo Bonzini
2023-03-02 12:30 ` [PULL 51/62] hw/xen: Add xen_xenstore device for xenstore emulation Paolo Bonzini
2023-03-02 12:30 ` [PULL 52/62] hw/xen: Add basic ring handling to xenstore Paolo Bonzini
2023-03-02 12:30 ` [PULL 53/62] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests Paolo Bonzini
2023-03-02 12:30 ` [PULL 54/62] i386/xen: Implement HYPERVISOR_physdev_op Paolo Bonzini
2023-03-02 12:30 ` [PULL 55/62] hw/xen: Implement emulated PIRQ hypercall support Paolo Bonzini
2023-03-02 12:30 ` [PULL 56/62] hw/xen: Support GSI mapping to PIRQ Paolo Bonzini
2023-06-23 14:48 ` Peter Maydell
2023-07-04 15:12 ` [PATCH] i386/xen: fix off-by-one in xen_evtchn_set_gsi() Woodhouse, David via
2023-07-17 17:51 ` Peter Maydell
2023-07-17 21:39 ` [PATCH-for-8.1] " Philippe Mathieu-Daudé
2023-03-02 12:30 ` [PULL 57/62] hw/xen: Support MSI mapping to PIRQ Paolo Bonzini
2023-04-06 15:48 ` Peter Maydell
2023-04-06 16:24 ` Woodhouse, David via
2023-06-23 13:27 ` Peter Maydell
2023-07-04 17:28 ` David Woodhouse
2023-12-19 13:36 ` Peter Maydell
2024-07-25 14:12 ` Peter Maydell
2023-03-02 12:30 ` [PULL 58/62] kvm/i386: Add xen-evtchn-max-pirq property Paolo Bonzini
2023-03-02 12:30 ` [PULL 59/62] i386/xen: Document Xen HVM emulation Paolo Bonzini
2023-03-02 12:30 ` [PULL 60/62] hw/xen: Subsume xen_be_register_common() into xen_be_init() Paolo Bonzini
2023-03-02 12:30 ` [PULL 61/62] qapi: Add 'acpi' field to 'query-machines' output Paolo Bonzini
2023-03-02 12:30 ` [PULL 62/62] Makefile: qemu-bundle is a directory Paolo Bonzini
2023-03-03 11:03 ` [PULL 00/62] i386, misc changes for QEMU 8.0 soft freeze Peter Maydell
2023-03-03 13:44 ` David Woodhouse
2023-03-03 13:46 ` Peter Maydell
2023-03-03 14:16 ` David Woodhouse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230302123029.153265-22-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=dwmw@amazon.co.uk \
--cc=joao.m.martins@oracle.com \
--cc=paul@xen.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).