From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"Paul Durrant" <paul@xen.org>,
"Joao Martins" <joao.m.martins@oracle.com>,
"Ankur Arora" <ankur.a.arora@oracle.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Thomas Huth" <thuth@redhat.com>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Juan Quintela" <quintela@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Claudio Fontana" <cfontana@suse.de>,
"Julien Grall" <julien@xen.org>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
armbru@redhat.com
Subject: [RFC PATCH v7bis 12/19] hw/xen: Add backend implementation of grant table operations
Date: Mon, 16 Jan 2023 22:19:12 +0000 [thread overview]
Message-ID: <20230116221919.1124201-13-dwmw2@infradead.org> (raw)
In-Reply-To: <20230116221919.1124201-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
This is limited to mapping a single grant at a time, because under Xen the
pages are mapped *contiguously* into qemu's address space, and that's very
hard to do when those pages actually come from anonymous mappings in qemu
in the first place.
Eventually perhaps we can look at using shared mappings of actual objects
for system RAM, and then we can make new mappings of the same backing
store (be it deleted files, shmem, whatever). But for now let's stuck to
a page at a time.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
hw/i386/kvm/xen_gnttab.c | 294 ++++++++++++++++++++++++++++++++++++++-
1 file changed, 291 insertions(+), 3 deletions(-)
diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c
index 5e8fc0184e..8008a391bd 100644
--- a/hw/i386/kvm/xen_gnttab.c
+++ b/hw/i386/kvm/xen_gnttab.c
@@ -21,6 +21,7 @@
#include "hw/sysbus.h"
#include "hw/xen/xen.h"
+#include "hw/xen/xen_backend_ops.h"
#include "xen_overlay.h"
#include "xen_gnttab.h"
@@ -33,12 +34,11 @@
#define TYPE_XEN_GNTTAB "xen-gnttab"
OBJECT_DECLARE_SIMPLE_TYPE(XenGnttabState, XEN_GNTTAB)
-#define XEN_PAGE_SHIFT 12
-#define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT)
-
#define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
#define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t))
+static struct gnttab_backend_ops emu_gnttab_backend_ops;
+
struct XenGnttabState {
/*< private >*/
SysBusDevice busdev;
@@ -57,6 +57,8 @@ struct XenGnttabState {
MemoryRegion gnt_frames;
MemoryRegion *gnt_aliases;
uint64_t *gnt_frame_gpas;
+
+ uint8_t *map_track;
};
struct XenGnttabState *xen_gnttab_singleton;
@@ -88,9 +90,15 @@ static void xen_gnttab_realize(DeviceState *dev, Error **errp)
s->gnt_frame_gpas[i] = INVALID_GPA;
}
+ s->entries.v1[GNTTAB_RESERVED_XENSTORE].flags = GTF_permit_access;
+ s->entries.v1[GNTTAB_RESERVED_XENSTORE].frame = XEN_SPECIAL_PFN(XENSTORE);
qemu_mutex_init(&s->gnt_lock);
xen_gnttab_singleton = s;
+
+ s->map_track = g_new0(uint8_t, s->max_frames * ENTRIES_PER_FRAME_V1);
+
+ xen_gnttab_ops = &emu_gnttab_backend_ops;
}
static int xen_gnttab_post_load(void *opaque, int version_id)
@@ -236,3 +244,283 @@ int xen_gnttab_query_size_op(struct gnttab_query_size *size)
size->max_nr_frames = s->max_frames;
return 0;
}
+
+/* Track per-open refs, to allow close() to clean up. */
+struct active_ref {
+ MemoryRegionSection mrs;
+ void *virtaddr;
+ uint32_t refcnt;
+ int prot;
+};
+
+static void gnt_unref(XenGnttabState *s, grant_ref_t ref,
+ MemoryRegionSection *mrs, int prot)
+{
+ if (mrs && mrs->mr) {
+ if (prot & PROT_WRITE) {
+ memory_region_set_dirty(mrs->mr, mrs->offset_within_region,
+ XEN_PAGE_SIZE);
+ }
+ memory_region_unref(mrs->mr);
+ mrs->mr = NULL;
+ }
+ assert(s->map_track[ref] != 0);
+
+ if (--s->map_track[ref] == 0) {
+ grant_entry_v1_t *gnt_p = &s->entries.v1[ref];
+ qatomic_and(&gnt_p->flags, ~(GTF_reading | GTF_writing));
+ }
+}
+
+static uint64_t gnt_ref(XenGnttabState *s, grant_ref_t ref, int prot)
+{
+ uint16_t mask = GTF_type_mask | GTF_sub_page;
+ volatile grant_entry_v1_t *gnt_p;
+ grant_entry_v1_t gnt;
+ int retries = 0;
+
+ if (ref >= s->max_frames * ENTRIES_PER_FRAME_V1 ||
+ s->map_track[ref] == UINT8_MAX) {
+ return INVALID_GPA;
+ }
+
+ if (prot & PROT_WRITE) {
+ mask |= GTF_readonly;
+ }
+
+ gnt_p = &s->entries.v1[ref];
+
+ /*
+ * The guest can legitimately be changing the GTF_readonly flag. Allow
+ * that, but don't let a malicious guest cause a livelock.
+ */
+ for (retries = 0; retries < 5; retries++) {
+ uint16_t new_flags;
+ gnt = *gnt_p;
+
+ if ((gnt.flags & mask) != GTF_permit_access ||
+ gnt.domid != DOMID_QEMU) {
+ return INVALID_GPA;
+ }
+
+ new_flags = gnt.flags | GTF_reading;
+ if (prot & PROT_WRITE) {
+ new_flags |= GTF_writing;
+ }
+
+ if (qatomic_cmpxchg(&gnt_p->flags, gnt.flags, new_flags) == gnt.flags) {
+ return (uint64_t)gnt.frame << XEN_PAGE_SHIFT;
+ }
+ }
+
+ return INVALID_GPA;
+}
+
+struct xengntdev_handle {
+ GHashTable *active_maps;
+};
+
+static int xen_be_gnttab_set_max_grants(struct xengntdev_handle *xgt,
+ uint32_t nr_grants)
+{
+ return 0;
+}
+
+static void *xen_be_gnttab_map_refs(struct xengntdev_handle *xgt, uint32_t count,
+ uint32_t domid, uint32_t *refs, int prot)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+ struct active_ref *act;
+
+ if (!s) {
+ errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (domid != xen_domid) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (!count || count > 4096) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ /*
+ * Making a contiguous mapping from potentially discontiguous grant
+ * references would be... distinctly non-trivial. We don't support it.
+ * Even changing the API to return an array of pointers, one per page,
+ * wouldn't be simple to use in PV backends because some structures
+ * actually cross page boundaries (e.g. 32-bit blkif_response ring
+ * entries are 12 bytes).
+ */
+ if (count != 1) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ QEMU_LOCK_GUARD(&s->gnt_lock);
+
+ act = g_hash_table_lookup(xgt->active_maps, GINT_TO_POINTER(refs[0]));
+ if (act) {
+ if ((prot & PROT_WRITE) && !(act->prot & PROT_WRITE)) {
+ if (gnt_ref(s, refs[0], prot) == INVALID_GPA) {
+ return NULL;
+ }
+ act->prot |= PROT_WRITE;
+ }
+ act->refcnt++;
+ } else {
+ uint64_t gpa = gnt_ref(s, refs[0], prot);
+ if (gpa == INVALID_GPA) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ act = g_new0(struct active_ref, 1);
+ act->prot = prot;
+ act->refcnt = 1;
+ act->mrs = memory_region_find(get_system_memory(), gpa, XEN_PAGE_SIZE);
+
+ if (!act->mrs.mr || act->mrs.size < XEN_PAGE_SIZE ||
+ memory_region_get_ram_addr(act->mrs.mr) == RAM_ADDR_INVALID ||
+ !(act->virtaddr = qemu_map_ram_ptr(act->mrs.mr->ram_block,
+ act->mrs.offset_within_region))) {
+ gnt_unref(s, refs[0], &act->mrs, 0);
+ g_free(act);
+ errno = EINVAL;
+ return NULL;
+ }
+
+ s->map_track[refs[0]]++;
+ g_hash_table_insert(xgt->active_maps, GINT_TO_POINTER(refs[0]), act);
+ }
+
+ return act->virtaddr;
+}
+
+static gboolean do_unmap(gpointer key, gpointer value, gpointer user_data)
+{
+ XenGnttabState *s = user_data;
+ grant_ref_t gref = GPOINTER_TO_INT(key);
+ struct active_ref *act = value;
+
+ gnt_unref(s, gref, &act->mrs, act->prot);
+ g_free(act);
+ return true;
+}
+
+static int xen_be_gnttab_unmap(struct xengntdev_handle *xgt,
+ void *start_address, uint32_t *refs,
+ uint32_t count)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+ struct active_ref *act;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (count != 1) {
+ return -EINVAL;
+ }
+
+ QEMU_LOCK_GUARD(&s->gnt_lock);
+
+ act = g_hash_table_lookup(xgt->active_maps, GINT_TO_POINTER(refs[0]));
+ if (!act) {
+ return -ENOENT;
+ }
+
+ if (act->virtaddr != start_address) {
+ return -EINVAL;
+ }
+
+ if (!--act->refcnt) {
+ do_unmap(GINT_TO_POINTER(refs[0]), act, s);
+ g_hash_table_remove(xgt->active_maps, GINT_TO_POINTER(refs[0]));
+ }
+
+ return 0;
+}
+
+/*
+ * This looks a bit like the one for true Xen in xen-operations.c but
+ * in emulation we don't support multi-page mappings. And under Xen we
+ * *want* the multi-page mappings so we have fewer bounces through the
+ * kernel and the hypervisor. So the code paths end up being similar,
+ * but different.
+ */
+static int xen_be_gnttab_copy(struct xengntdev_handle *xgt, bool to_domain,
+ uint32_t domid, XenGrantCopySegment *segs,
+ uint32_t nr_segs, Error **errp)
+{
+ int prot = to_domain ? PROT_WRITE : PROT_READ;
+ unsigned int i;
+
+ for (i = 0; i < nr_segs; i++) {
+ XenGrantCopySegment *seg = &segs[i];
+ void *page;
+ uint32_t ref = to_domain ? seg->dest.foreign.ref :
+ seg->source.foreign.ref;
+
+ page = xen_be_gnttab_map_refs(xgt, 1, domid, &ref, prot);
+ if (!page) {
+ if (errp) {
+ error_setg_errno(errp, errno,
+ "xen_be_gnttab_map_refs failed");
+ }
+ return -errno;
+ }
+
+ if (to_domain) {
+ memcpy(page + seg->dest.foreign.offset, seg->source.virt,
+ seg->len);
+ } else {
+ memcpy(seg->dest.virt, page + seg->source.foreign.offset,
+ seg->len);
+ }
+
+ if (xen_be_gnttab_unmap(xgt, page, &ref, 1)) {
+ if (errp) {
+ error_setg_errno(errp, errno, "xen_be_gnttab_unmap failed");
+ }
+ return -errno;
+ }
+ }
+
+ return 0;
+}
+
+static struct xengntdev_handle *xen_be_gnttab_open(void)
+{
+ struct xengntdev_handle *xgt = g_new0(struct xengntdev_handle, 1);
+
+ xgt->active_maps = g_hash_table_new(g_direct_hash, g_direct_equal);
+ return xgt;
+}
+
+static int xen_be_gnttab_close(struct xengntdev_handle *xgt)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ g_hash_table_foreach_remove(xgt->active_maps, do_unmap, s);
+ g_hash_table_destroy(xgt->active_maps);
+ g_free(xgt);
+ return 0;
+}
+
+static struct gnttab_backend_ops emu_gnttab_backend_ops = {
+ .open = xen_be_gnttab_open,
+ .close = xen_be_gnttab_close,
+ .grant_copy = xen_be_gnttab_copy,
+ .set_max_grants = xen_be_gnttab_set_max_grants,
+ .map_refs = xen_be_gnttab_map_refs,
+ .unmap = xen_be_gnttab_unmap,
+};
+
--
2.39.0
next prev parent reply other threads:[~2023-01-16 22:21 UTC|newest]
Thread overview: 97+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-16 21:57 [PATCH v7 00/51] Xen support under KVM David Woodhouse
2023-01-16 21:57 ` [PATCH v7 01/51] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-19 13:02 ` Alex Bennée
2023-01-19 15:06 ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 02/51] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-16 21:57 ` [PATCH v7 03/51] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-16 21:57 ` [PATCH v7 04/51] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-16 21:57 ` [PATCH v7 05/51] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-16 21:57 ` [PATCH v7 06/51] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-16 21:57 ` [PATCH v7 07/51] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-16 21:57 ` [PATCH v7 08/51] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-17 9:44 ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 09/51] i386/xen: handle guest hypercalls David Woodhouse
2023-01-16 21:57 ` [PATCH v7 10/51] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-16 21:57 ` [PATCH v7 11/51] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-16 21:57 ` [PATCH v7 12/51] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-16 21:57 ` [PATCH v7 13/51] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-16 21:57 ` [PATCH v7 14/51] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-17 9:47 ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 15/51] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-16 21:57 ` [PATCH v7 16/51] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 17/51] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-16 21:57 ` [PATCH v7 18/51] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 19/51] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 20/51] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-16 21:57 ` [PATCH v7 21/51] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-16 21:57 ` [PATCH v7 22/51] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-16 21:57 ` [PATCH v7 23/51] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-17 9:53 ` Paul Durrant
2023-01-17 9:59 ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 24/51] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-16 21:57 ` [PATCH v7 25/51] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-16 21:57 ` [PATCH v7 26/51] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-17 10:00 ` Paul Durrant
2023-01-17 10:23 ` David Woodhouse
2023-01-17 10:56 ` Paul Durrant
2023-01-17 11:02 ` David Woodhouse
2023-01-17 11:06 ` Paul Durrant
2023-01-17 11:24 ` David Woodhouse
2023-01-17 11:53 ` Paul Durrant
2023-01-17 12:12 ` David Woodhouse
2023-01-17 13:01 ` [PATCH v7.1 " David Woodhouse
2023-01-16 21:57 ` [PATCH v7 27/51] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-17 11:11 ` Paul Durrant
2023-01-17 12:31 ` David Woodhouse
2023-01-17 13:11 ` Paul Durrant
2023-01-17 12:01 ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 28/51] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-16 21:57 ` [PATCH v7 29/51] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-16 21:57 ` [PATCH v7 30/51] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-16 21:57 ` [PATCH v7 31/51] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-16 21:57 ` [PATCH v7 32/51] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-16 21:57 ` [PATCH v7 33/51] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-16 21:57 ` [PATCH v7 34/51] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-16 21:57 ` [PATCH v7 35/51] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-16 21:57 ` [PATCH v7 36/51] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-16 21:57 ` [PATCH v7 37/51] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-16 21:57 ` [PATCH v7 38/51] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-17 10:08 ` Markus Armbruster
2023-01-17 10:41 ` David Woodhouse
2023-01-17 11:31 ` David Woodhouse
2023-01-19 11:01 ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 39/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-16 21:57 ` [PATCH v7 40/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-16 21:57 ` [PATCH v7 41/51] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-16 21:57 ` [PATCH v7 42/51] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-16 21:57 ` [PATCH v7 43/51] hw/xen: Support mapping grant frames David Woodhouse
2023-01-16 21:57 ` [PATCH v7 44/51] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-16 21:57 ` [PATCH v7 45/51] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-16 21:58 ` [PATCH v7 46/51] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-16 21:58 ` [PATCH v7 47/51] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-16 21:58 ` [PATCH v7 48/51] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-16 21:58 ` [PATCH v7 49/51] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-16 21:58 ` [PATCH v7 50/51] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-16 21:58 ` [PATCH v7 51/51] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 00/19] Emulated Xen PV backend and PIRQ support David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 01/19] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 02/19] hw/xen: Add emulated evtchn ops David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 03/19] hw/xen: Add gnttab operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 04/19] hw/xen: Pass grant ref to gnttab unmap David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 05/19] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 06/19] hw/xen: Add xenstore " David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 07/19] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 08/19] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 09/19] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 10/19] hw/xen: Build PV backend drivers for XENFV_MACHINE David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 11/19] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-01-16 22:19 ` David Woodhouse [this message]
2023-01-16 22:19 ` [RFC PATCH v7bis 13/19] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 14/19] hw/xen: Remove old version of Xen headers David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 15/19] i386/xen: Initialize XenBus and legacy backends from pc_init1() David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 16/19] i386/xen: Implement HYPERVISOR_physdev_op David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 17/19] hw/xen: Implement emulated PIRQ hypercall support David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 18/19] hw/xen: Support GSI mapping to PIRQ David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 19/19] hw/xen: Support MSI " David Woodhouse
2023-01-17 16:01 ` [PATCH v7 52/51] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests David Woodhouse
2023-01-17 16:02 ` [PATCH v7 53/51] i386/xen: Document Xen HVM emulation David Woodhouse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230116221919.1124201-13-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=alex.bennee@linaro.org \
--cc=ankur.a.arora@oracle.com \
--cc=armbru@redhat.com \
--cc=cfontana@suse.de \
--cc=dgilbert@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=julien@xen.org \
--cc=marcel.apfelbaum@gmail.com \
--cc=mst@redhat.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).