From: David Woodhouse <dwmw2@infradead.org>
To: Peter Maydell <peter.maydell@linaro.org>, qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"Paul Durrant" <paul@xen.org>,
"Joao Martins" <joao.m.martins@oracle.com>,
"Ankur Arora" <ankur.a.arora@oracle.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Thomas Huth" <thuth@redhat.com>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Juan Quintela" <quintela@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Claudio Fontana" <cfontana@suse.de>,
"Julien Grall" <julien@xen.org>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
armbru@redhat.com
Subject: [PATCH v10 32/59] hw/xen: Implement EVTCHNOP_bind_virq
Date: Wed, 1 Feb 2023 14:31:21 +0000 [thread overview]
Message-ID: <20230201143148.1744093-33-dwmw2@infradead.org> (raw)
In-Reply-To: <20230201143148.1744093-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
Add the array of virq ports to each vCPU so that we can deliver timers,
debug ports, etc. Global virqs are allocated against vCPU 0 initially,
but can be migrated to other vCPUs (when we implement that).
The kernel needs to know about VIRQ_TIMER in order to accelerate timers,
so tell it via KVM_XEN_VCPU_ATTR_TYPE_TIMER. Also save/restore the value
of the singleshot timer across migration, as the kernel will handle the
hypercalls automatically now.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
hw/i386/kvm/xen_evtchn.c | 85 ++++++++++++++++++++++++++++++++++++
hw/i386/kvm/xen_evtchn.h | 2 +
include/sysemu/kvm_xen.h | 1 +
target/i386/cpu.h | 4 ++
target/i386/kvm/xen-emu.c | 91 +++++++++++++++++++++++++++++++++++++++
target/i386/machine.c | 2 +
6 files changed, 185 insertions(+)
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index deea7de027..da2f5711dd 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -244,6 +244,11 @@ static bool valid_port(evtchn_port_t port)
}
}
+static bool valid_vcpu(uint32_t vcpu)
+{
+ return !!qemu_get_cpu(vcpu);
+}
+
int xen_evtchn_status_op(struct evtchn_status *status)
{
XenEvtchnState *s = xen_evtchn_singleton;
@@ -494,6 +499,43 @@ static void free_port(XenEvtchnState *s, evtchn_port_t port)
clear_port_pending(s, port);
}
+static int allocate_port(XenEvtchnState *s, uint32_t vcpu, uint16_t type,
+ uint16_t val, evtchn_port_t *port)
+{
+ evtchn_port_t p = 1;
+
+ for (p = 1; valid_port(p); p++) {
+ if (s->port_table[p].type == EVTCHNSTAT_closed) {
+ s->port_table[p].vcpu = vcpu;
+ s->port_table[p].type = type;
+ s->port_table[p].type_val = val;
+
+ *port = p;
+
+ if (s->nr_ports < p + 1) {
+ s->nr_ports = p + 1;
+ }
+
+ return 0;
+ }
+ }
+ return -ENOSPC;
+}
+
+static bool virq_is_global(uint32_t virq)
+{
+ switch (virq) {
+ case VIRQ_TIMER:
+ case VIRQ_DEBUG:
+ case VIRQ_XENOPROF:
+ case VIRQ_XENPMU:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
static int close_port(XenEvtchnState *s, evtchn_port_t port)
{
XenEvtchnPort *p = &s->port_table[port];
@@ -502,6 +544,11 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port)
case EVTCHNSTAT_closed:
return -ENOENT;
+ case EVTCHNSTAT_virq:
+ kvm_xen_set_vcpu_virq(virq_is_global(p->type_val) ? 0 : p->vcpu,
+ p->type_val, 0);
+ break;
+
default:
break;
}
@@ -553,3 +600,41 @@ int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
return ret;
}
+
+int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (virq->virq >= NR_VIRQS) {
+ return -EINVAL;
+ }
+
+ /* Global VIRQ must be allocated on vCPU0 first */
+ if (virq_is_global(virq->virq) && virq->vcpu != 0) {
+ return -EINVAL;
+ }
+
+ if (!valid_vcpu(virq->vcpu)) {
+ return -ENOENT;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = allocate_port(s, virq->vcpu, EVTCHNSTAT_virq, virq->virq,
+ &virq->port);
+ if (!ret) {
+ ret = kvm_xen_set_vcpu_virq(virq->vcpu, virq->virq, virq->port);
+ if (ret) {
+ free_port(s, virq->port);
+ }
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index 69c6b0d743..0ea13dda3a 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -18,8 +18,10 @@ int xen_evtchn_set_callback_param(uint64_t param);
struct evtchn_status;
struct evtchn_close;
struct evtchn_unmask;
+struct evtchn_bind_virq;
int xen_evtchn_status_op(struct evtchn_status *status);
int xen_evtchn_close_op(struct evtchn_close *close);
int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
+int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq);
#endif /* QEMU_XEN_EVTCHN_H */
diff --git a/include/sysemu/kvm_xen.h b/include/sysemu/kvm_xen.h
index 0c0efbe699..297630cd87 100644
--- a/include/sysemu/kvm_xen.h
+++ b/include/sysemu/kvm_xen.h
@@ -23,6 +23,7 @@ int kvm_xen_soft_reset(void);
uint32_t kvm_xen_get_caps(void);
void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type);
+int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port);
#define kvm_xen_has_cap(cap) (!!(kvm_xen_get_caps() & \
KVM_XEN_HVM_CONFIG_ ## cap))
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c9b12e7476..dba8732fc6 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -27,6 +27,8 @@
#include "qapi/qapi-types-common.h"
#include "qemu/cpu-float.h"
+#define XEN_NR_VIRQS 24
+
/* The x86 has a strong memory model with some store-after-load re-ordering */
#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
@@ -1795,6 +1797,8 @@ typedef struct CPUArchState {
uint64_t xen_vcpu_time_info_gpa;
uint64_t xen_vcpu_runstate_gpa;
uint8_t xen_vcpu_callback_vector;
+ uint16_t xen_virq[XEN_NR_VIRQS];
+ uint64_t xen_singleshot_timer_ns;
#endif
#if defined(CONFIG_HVF)
HVFX86LazyFlags hvf_lflags;
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index 418028b04f..0c4988ad63 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -352,6 +352,53 @@ void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type)
}
}
+static int kvm_xen_set_vcpu_timer(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ struct kvm_xen_vcpu_attr va = {
+ .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
+ .u.timer.port = env->xen_virq[VIRQ_TIMER],
+ .u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
+ .u.timer.expires_ns = env->xen_singleshot_timer_ns,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va);
+}
+
+static void do_set_vcpu_timer_virq(CPUState *cs, run_on_cpu_data data)
+{
+ kvm_xen_set_vcpu_timer(cs);
+}
+
+int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port)
+{
+ CPUState *cs = qemu_get_cpu(vcpu_id);
+
+ if (!cs) {
+ return -ENOENT;
+ }
+
+ /* cpu.h doesn't include the actual Xen header. */
+ qemu_build_assert(NR_VIRQS == XEN_NR_VIRQS);
+
+ if (virq >= NR_VIRQS) {
+ return -EINVAL;
+ }
+
+ if (port && X86_CPU(cs)->env.xen_virq[virq]) {
+ return -EEXIST;
+ }
+
+ X86_CPU(cs)->env.xen_virq[virq] = port;
+ if (virq == VIRQ_TIMER && kvm_xen_has_cap(EVTCHN_SEND)) {
+ async_run_on_cpu(cs, do_set_vcpu_timer_virq,
+ RUN_ON_CPU_HOST_INT(port));
+ }
+ return 0;
+}
+
static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
@@ -384,6 +431,8 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
env->xen_vcpu_time_info_gpa = INVALID_GPA;
env->xen_vcpu_runstate_gpa = INVALID_GPA;
env->xen_vcpu_callback_vector = 0;
+ env->xen_singleshot_timer_ns = 0;
+ memset(env->xen_virq, 0, sizeof(env->xen_virq));
set_vcpu_info(cs, INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
@@ -392,6 +441,7 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
INVALID_GPA);
if (kvm_xen_has_cap(EVTCHN_SEND)) {
kvm_xen_set_vcpu_callback_vector(cs);
+ kvm_xen_set_vcpu_timer(cs);
}
}
@@ -826,6 +876,21 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
err = xen_evtchn_unmask_op(&unmask);
break;
}
+ case EVTCHNOP_bind_virq: {
+ struct evtchn_bind_virq virq;
+
+ qemu_build_assert(sizeof(virq) == 12);
+ if (kvm_copy_from_gva(cs, arg, &virq, sizeof(virq))) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = xen_evtchn_bind_virq_op(&virq);
+ if (!err && kvm_copy_to_gva(cs, arg, &virq, sizeof(virq))) {
+ err = -EFAULT;
+ }
+ break;
+ }
default:
return false;
}
@@ -1057,6 +1122,12 @@ int kvm_put_xen_state(CPUState *cs)
}
}
+ if (env->xen_virq[VIRQ_TIMER]) {
+ ret = kvm_xen_set_vcpu_timer(cs);
+ if (ret < 0) {
+ return ret;
+ }
+ }
return 0;
}
@@ -1065,6 +1136,7 @@ int kvm_get_xen_state(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint64_t gpa;
+ int ret;
/*
* The kernel does not mark vcpu_info as dirty when it delivers interrupts
@@ -1086,5 +1158,24 @@ int kvm_get_xen_state(CPUState *cs)
}
}
+ if (!kvm_xen_has_cap(EVTCHN_SEND)) {
+ return 0;
+ }
+
+ /*
+ * If the kernel is accelerating timers, read out the current value of the
+ * singleshot timer deadline.
+ */
+ if (env->xen_virq[VIRQ_TIMER]) {
+ struct kvm_xen_vcpu_attr va = {
+ .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
+ };
+ ret = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_GET_ATTR, &va);
+ if (ret < 0) {
+ return ret;
+ }
+ env->xen_singleshot_timer_ns = va.u.timer.expires_ns;
+ }
+
return 0;
}
diff --git a/target/i386/machine.c b/target/i386/machine.c
index a4874eda90..603a1077e3 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1275,6 +1275,8 @@ static const VMStateDescription vmstate_xen_vcpu = {
VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
+ VMSTATE_UINT16_ARRAY(env.xen_virq, X86CPU, XEN_NR_VIRQS),
+ VMSTATE_UINT64(env.xen_singleshot_timer_ns, X86CPU),
VMSTATE_END_OF_LIST()
}
};
--
2.39.0
next prev parent reply other threads:[~2023-02-01 14:44 UTC|newest]
Thread overview: 104+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-01 14:30 [PATCH v10 00/59] Xen HVM support under KVM David Woodhouse
2023-02-01 14:30 ` [PATCH v10 01/59] include: import Xen public headers to hw/xen/interface David Woodhouse
2023-02-01 14:30 ` [PATCH v10 02/59] xen: add CONFIG_XEN_BUS and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-02-01 14:30 ` [PATCH v10 03/59] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-02-01 14:30 ` [PATCH v10 04/59] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-02-01 14:30 ` [PATCH v10 05/59] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-02-01 14:30 ` [PATCH v10 06/59] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-02-01 14:30 ` [PATCH v10 07/59] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-02-01 14:30 ` [PATCH v10 08/59] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-02-01 14:30 ` [PATCH v10 09/59] i386/xen: handle guest hypercalls David Woodhouse
2023-02-01 14:30 ` [PATCH v10 10/59] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-02-01 14:31 ` [PATCH v10 11/59] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-02-01 14:31 ` [PATCH v10 12/59] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-02-01 14:31 ` [PATCH v10 13/59] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-02-01 14:31 ` [PATCH v10 14/59] xen: Permit --xen-domid argument when accel is KVM David Woodhouse
2023-02-01 14:31 ` [PATCH v10 15/59] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-02-01 14:31 ` [PATCH v10 16/59] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-02-01 14:31 ` [PATCH v10 17/59] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-02-01 14:31 ` [PATCH v10 18/59] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-02-01 14:31 ` [PATCH v10 19/59] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-02-01 14:31 ` [PATCH v10 20/59] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-02-01 14:31 ` [PATCH v10 21/59] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-02-01 14:31 ` [PATCH v10 22/59] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-02-01 14:31 ` [PATCH v10 23/59] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-02-01 14:31 ` [PATCH v10 24/59] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-02-01 14:31 ` [PATCH v10 25/59] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-02-01 14:31 ` [PATCH v10 26/59] i386/xen: implement HVMOP_set_param David Woodhouse
2023-02-01 14:31 ` [PATCH v10 27/59] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-02-01 14:31 ` [PATCH v10 28/59] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-02-01 14:31 ` [PATCH v10 29/59] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-02-01 14:31 ` [PATCH v10 30/59] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-02-10 13:25 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 31/59] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-02-10 13:33 ` Paul Durrant
2023-02-01 14:31 ` David Woodhouse [this message]
2023-02-10 13:48 ` [PATCH v10 32/59] hw/xen: Implement EVTCHNOP_bind_virq Paul Durrant
2023-02-15 13:08 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 33/59] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-02-10 13:55 ` Paul Durrant
2023-02-10 14:15 ` David Woodhouse
2023-02-10 14:25 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 34/59] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-02-10 13:58 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 35/59] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-02-10 14:13 ` Paul Durrant
2023-02-10 14:17 ` David Woodhouse
2023-02-10 14:28 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 36/59] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-02-10 14:30 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 37/59] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-02-10 14:36 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 38/59] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-02-10 14:44 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 39/59] i386/xen: add monitor commands to test event injection David Woodhouse
2023-02-10 14:54 ` Paul Durrant
2023-02-10 15:05 ` David Woodhouse
2023-02-10 15:12 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 40/59] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-02-10 15:24 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 41/59] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-02-10 16:47 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 42/59] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-02-13 15:17 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 43/59] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-02-13 15:21 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 44/59] hw/xen: Support mapping grant frames David Woodhouse
2023-02-13 15:31 ` Paul Durrant
2023-02-14 15:35 ` David Woodhouse
2023-02-14 15:40 ` Paul Durrant
2023-02-14 15:41 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 45/59] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-02-14 9:59 ` Paul Durrant
2023-02-14 15:33 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 46/59] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-02-14 10:00 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 47/59] i386/xen: handle PV timer hypercalls David Woodhouse
2023-02-14 10:10 ` Paul Durrant
2023-02-14 15:37 ` David Woodhouse
2023-02-15 13:43 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 48/59] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-02-14 15:35 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 49/59] i386/xen: handle HVMOP_get_param David Woodhouse
2023-02-14 15:47 ` Paul Durrant
2023-02-14 15:50 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 50/59] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-02-14 15:53 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 51/59] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-02-14 15:55 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 52/59] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-02-14 16:02 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 53/59] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests David Woodhouse
2023-02-14 16:04 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 54/59] i386/xen: Implement HYPERVISOR_physdev_op David Woodhouse
2023-02-14 16:06 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 55/59] hw/xen: Implement emulated PIRQ hypercall support David Woodhouse
2023-02-14 16:10 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 56/59] hw/xen: Support GSI mapping to PIRQ David Woodhouse
2023-02-14 16:12 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 57/59] hw/xen: Support MSI " David Woodhouse
2023-02-14 16:14 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 58/59] kvm/i386: Add xen-evtchn-max-pirq property David Woodhouse
2023-02-14 16:16 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 59/59] i386/xen: Document Xen HVM emulation David Woodhouse
2023-02-14 16:20 ` Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230201143148.1744093-33-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=alex.bennee@linaro.org \
--cc=ankur.a.arora@oracle.com \
--cc=armbru@redhat.com \
--cc=cfontana@suse.de \
--cc=dgilbert@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=julien@xen.org \
--cc=marcel.apfelbaum@gmail.com \
--cc=mst@redhat.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).