qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: Peter Maydell <peter.maydell@linaro.org>, qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
	armbru@redhat.com
Subject: [PATCH v9 55/58] hw/xen: Implement emulated PIRQ hypercall support
Date: Sat, 28 Jan 2023 08:11:10 +0000	[thread overview]
Message-ID: <20230128081113.1615111-56-dwmw2@infradead.org> (raw)
In-Reply-To: <20230128081113.1615111-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

This wires up the basic infrastructure but the actual interrupts aren't
there yet, so don't advertise it to the guest.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/i386/kvm/trace-events  |   4 +
 hw/i386/kvm/trace.h       |   1 +
 hw/i386/kvm/xen_evtchn.c  | 300 +++++++++++++++++++++++++++++++++++++-
 hw/i386/kvm/xen_evtchn.h  |   2 +
 meson.build               |   1 +
 target/i386/kvm/xen-emu.c |  15 ++
 6 files changed, 318 insertions(+), 5 deletions(-)
 create mode 100644 hw/i386/kvm/trace-events
 create mode 100644 hw/i386/kvm/trace.h

diff --git a/hw/i386/kvm/trace-events b/hw/i386/kvm/trace-events
new file mode 100644
index 0000000000..04e60c5bb8
--- /dev/null
+++ b/hw/i386/kvm/trace-events
@@ -0,0 +1,4 @@
+kvm_xen_map_pirq(int pirq, int gsi) "pirq %d gsi %d"
+kvm_xen_unmap_pirq(int pirq, int gsi) "pirq %d gsi %d"
+kvm_xen_get_free_pirq(int pirq, int type) "pirq %d type %d"
+kvm_xen_bind_pirq(int pirq, int port) "pirq %d port %d"
diff --git a/hw/i386/kvm/trace.h b/hw/i386/kvm/trace.h
new file mode 100644
index 0000000000..e55d0812fd
--- /dev/null
+++ b/hw/i386/kvm/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_i386_kvm.h"
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 8cff78ddcb..4ee275c371 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -23,6 +23,7 @@
 #include "exec/target_page.h"
 #include "exec/address-spaces.h"
 #include "migration/vmstate.h"
+#include "trace.h"
 
 #include "hw/sysbus.h"
 #include "hw/xen/xen.h"
@@ -104,6 +105,21 @@ struct xenevtchn_handle {
 #define PORT_INFO_TYPEVAL_REMOTE_QEMU           0x8000
 #define PORT_INFO_TYPEVAL_REMOTE_PORT_MASK      0x7FFF
 
+/*
+ * These 'emuirq' values are used by Xen in the LM stream... and yes, I am
+ * insane enough to think about guest-transparent live migration from actual
+ * Xen to QEMU, and ensuring that we can convert/consume the stream.
+ */
+#define IRQ_UNBOUND -1
+#define IRQ_PT -2
+#define IRQ_MSI_EMU -3
+
+
+struct pirq_info {
+    int gsi;
+    uint16_t port;
+};
+
 struct XenEvtchnState {
     /*< private >*/
     SysBusDevice busdev;
@@ -121,8 +137,25 @@ struct XenEvtchnState {
     qemu_irq gsis[GSI_NUM_PINS];
 
     struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS];
+
+    uint32_t nr_pirqs;
+
+    /* Bitmap of allocated PIRQs (serialized) */
+    uint16_t nr_pirq_inuse_words;
+    uint64_t *pirq_inuse_bitmap;
+
+    /* GSI → PIRQ mapping (serialized) */
+    uint16_t gsi_pirq[GSI_NUM_PINS];
+
+    /* Per-PIRQ information (rebuilt on migration) */
+    struct pirq_info *pirq;
 };
 
+#define pirq_inuse_word(s, pirq) (s->pirq_inuse_bitmap[((pirq) / 64)])
+#define pirq_inuse_bit(pirq) (1ULL << ((pirq) & 63))
+
+#define pirq_inuse(s, pirq) (pirq_inuse_word(s, pirq) & pirq_inuse_bit(pirq))
+
 struct XenEvtchnState *xen_evtchn_singleton;
 
 /* Top bits of callback_param are the type (HVM_PARAM_CALLBACK_TYPE_xxx) */
@@ -137,17 +170,45 @@ static int xen_evtchn_pre_load(void *opaque)
     /* Unbind all the backend-side ports; they need to rebind */
     unbind_backend_ports(s);
 
+    /* It'll be leaked otherwise. */
+    g_free(s->pirq_inuse_bitmap);
+    s->pirq_inuse_bitmap = NULL;
+
     return 0;
 }
 
 static int xen_evtchn_post_load(void *opaque, int version_id)
 {
     XenEvtchnState *s = opaque;
+    uint32_t i;
 
     if (s->callback_param) {
         xen_evtchn_set_callback_param(s->callback_param);
     }
 
+    /* Rebuild s->pirq[].port mapping */
+    for (i = 0; i < s->nr_ports; i++) {
+        XenEvtchnPort *p = &s->port_table[i];
+
+        if (p->type == EVTCHNSTAT_pirq) {
+            assert(p->type_val);
+            assert(p->type_val < s->nr_pirqs);
+
+            /*
+             * Set the gsi to IRQ_UNBOUND; it may be changed to an actual
+             * GSI# below, or to IRQ_MSI_EMU when the MSI table snooping
+             * catches up with it.
+             */
+            s->pirq[p->type_val].gsi = IRQ_UNBOUND;
+            s->pirq[p->type_val].port = i;
+        }
+    }
+    /* Rebuild s->pirq[].gsi mapping */
+    for (i = 0; i < GSI_NUM_PINS; i++) {
+        if (s->gsi_pirq[i]) {
+            s->pirq[s->gsi_pirq[i]].gsi = i;
+        }
+    }
     return 0;
 }
 
@@ -180,6 +241,10 @@ static const VMStateDescription xen_evtchn_vmstate = {
         VMSTATE_UINT32(nr_ports, XenEvtchnState),
         VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1,
                                      xen_evtchn_port_vmstate, XenEvtchnPort),
+        VMSTATE_UINT16_ARRAY(gsi_pirq, XenEvtchnState, GSI_NUM_PINS),
+        VMSTATE_VARRAY_UINT16_ALLOC(pirq_inuse_bitmap, XenEvtchnState,
+                                    nr_pirq_inuse_words, 0,
+                                    vmstate_info_uint64, uint64_t),
         VMSTATE_END_OF_LIST()
     }
 };
@@ -220,6 +285,23 @@ void xen_evtchn_create(void)
     for (i = 0; i < GSI_NUM_PINS; i++) {
         sysbus_init_irq(SYS_BUS_DEVICE(s), &s->gsis[i]);
     }
+
+    /*
+     * We could parameterise the number of PIRQs available if needed,
+     * but for now limit it to 256. The Xen scheme for encoding PIRQ#
+     * into an MSI message is not compatible with 32-bit MSI, as it
+     * puts the high bits of the PIRQ# into the high bits of the MSI
+     * message address, instead of using the Extended Destination ID
+     * in address bits 4-11 which perhaps would have been a better
+     * choice. So to keep life simple, just stick with 256 as the
+     * default, which conveniently doesn't need to set anything
+     * outside the low 32 bits of the address.
+     */
+    s->nr_pirqs = 256;
+
+    s->nr_pirq_inuse_words = DIV_ROUND_UP(s->nr_pirqs, 64);
+    s->pirq_inuse_bitmap = g_new0(uint64_t, s->nr_pirq_inuse_words);
+    s->pirq = g_new0(struct pirq_info, s->nr_pirqs);
 }
 
 void xen_evtchn_connect_gsis(qemu_irq *system_gsis)
@@ -926,6 +1008,10 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port)
     case EVTCHNSTAT_closed:
         return -ENOENT;
 
+    case EVTCHNSTAT_pirq:
+        s->pirq[p->type_val].port = 0;
+        break;
+
     case EVTCHNSTAT_virq:
         kvm_xen_set_vcpu_virq(virq_is_global(p->type_val) ? 0 : p->vcpu,
                               p->type_val, 0);
@@ -1120,6 +1206,37 @@ int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq)
     return ret;
 }
 
+int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int ret;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    if (pirq->pirq >= s->nr_pirqs) {
+        return -EINVAL;
+    }
+
+    QEMU_LOCK_GUARD(&s->port_lock);
+
+    if (s->pirq[pirq->pirq].port) {
+        return -EBUSY;
+    }
+
+    ret = allocate_port(s, 0, EVTCHNSTAT_pirq, pirq->pirq,
+                        &pirq->port);
+    if (ret) {
+        return ret;
+    }
+
+    s->pirq[pirq->pirq].port = pirq->port;
+    trace_kvm_xen_bind_pirq(pirq->pirq, pirq->port);
+
+    return ret;
+}
+
 int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi)
 {
     XenEvtchnState *s = xen_evtchn_singleton;
@@ -1346,29 +1463,202 @@ int xen_evtchn_set_port(uint16_t port)
     return ret;
 }
 
+static int allocate_pirq(XenEvtchnState *s, int type, int gsi)
+{
+    uint16_t pirq;
+
+    /* Preserve the allocation strategy that Xen has. It looks like
+     * we *never* give out PIRQ 0-15, we give out 16-nr_irqs_gsi only
+     * to GSIs (counting up from 16), and then we count backwards from
+     * the top for MSIs or when the GSI space is exhausted. */
+    if (type == MAP_PIRQ_TYPE_GSI) {
+        for (pirq = 16 ; pirq < GSI_NUM_PINS; pirq++) {
+            if (pirq_inuse(s, pirq)) {
+                continue;
+            }
+
+            /* Found it */
+            goto found;
+        }
+    }
+    for (pirq = s->nr_pirqs - 1; pirq >= GSI_NUM_PINS; pirq--) {
+        /* Skip whole words at a time when they're full */
+        if (pirq_inuse_word(s, pirq) == UINT64_MAX) {
+            pirq &= ~63ULL;
+            continue;
+        }
+        if (pirq_inuse(s, pirq)) {
+            continue;
+        }
+
+        goto found;
+    }
+    return -ENOSPC;
+
+ found:
+    pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq);
+    if (gsi >= 0) {
+        assert(gsi <= GSI_NUM_PINS);
+        s->gsi_pirq[gsi] = pirq;
+    }
+    s->pirq[pirq].gsi = gsi;
+    return pirq;
+}
+
 int xen_physdev_map_pirq(struct physdev_map_pirq *map)
 {
-    return -ENOTSUP;
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int pirq = map->pirq;
+    int gsi = map->index;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    QEMU_LOCK_GUARD(&s->port_lock);
+
+    if (map->domid != DOMID_SELF && map->domid != xen_domid) {
+        return -EPERM;
+    }
+    if (map->type != MAP_PIRQ_TYPE_GSI) {
+        return -EINVAL;
+    }
+    if (gsi < 0 || gsi >= GSI_NUM_PINS) {
+        return -EINVAL;
+    }
+
+    if (pirq < 0) {
+        pirq = allocate_pirq(s, map->type, gsi);
+        if (pirq < 0) {
+            return pirq;
+        }
+        map->pirq = pirq;
+    } else if (pirq > s->nr_pirqs) {
+        return -EINVAL;
+    } else {
+        /* User specified a valid-looking PIRQ#. Allow it if it is
+         * allocated and not yet bound, or if it is unallocated */
+        if (pirq_inuse(s, pirq)) {
+            if (s->pirq[pirq].gsi != IRQ_UNBOUND) {
+                return -EBUSY;
+            }
+        } else {
+            /* If it was unused, mark it used now. */
+            pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq);
+        }
+        /* Set the mapping in both directions. */
+        s->pirq[pirq].gsi = gsi;
+        s->gsi_pirq[gsi] = pirq;
+    }
+
+    trace_kvm_xen_map_pirq(pirq, gsi);
+    return 0;
 }
 
 int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
 {
-    return -ENOTSUP;
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int pirq = unmap->pirq;
+    int gsi;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    if (unmap->domid != DOMID_SELF && unmap->domid != xen_domid)
+        return -EPERM;
+    if (pirq < 0 || pirq >= s->nr_pirqs)
+        return -EINVAL;
+
+    QEMU_LOCK_GUARD(&s->port_lock);
+
+    if (!pirq_inuse(s, pirq)) {
+        return -ENOENT;
+    }
+
+    gsi = s->pirq[pirq].gsi;
+
+    /* We can only unmap GSI PIRQs */
+    if (gsi < 0) {
+        return -EINVAL;
+    }
+
+    s->gsi_pirq[gsi] = 0;
+    s->pirq[pirq].gsi = IRQ_UNBOUND; /* Doesn't actually matter because: */
+    pirq_inuse_word(s, pirq) &= ~pirq_inuse_bit(pirq);
+
+    trace_kvm_xen_unmap_pirq(pirq, gsi);
+    return 0;
 }
 
 int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
 {
-    return -ENOTSUP;
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int pirq = eoi->irq;
+    int gsi;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    QEMU_LOCK_GUARD(&s->port_lock);
+
+    if (!pirq_inuse(s, pirq)) {
+        return -ENOENT;
+    }
+
+    gsi = s->pirq[pirq].gsi;
+    if (gsi < 0) {
+        return -EINVAL;
+    }
+
+    // XX: Reassert a level IRQ if needed */
+    return 0;
 }
 
 int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
 {
-    return -ENOTSUP;
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int pirq = query->irq;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    QEMU_LOCK_GUARD(&s->port_lock);
+
+    if (!pirq_inuse(s, pirq)) {
+        return -ENOENT;
+    }
+
+    if (s->pirq[pirq].gsi >= 0) {
+        query->flags = XENIRQSTAT_needs_eoi;
+    } else {
+        query->flags = 0;
+    }
+
+    return 0;
 }
 
 int xen_physdev_get_free_pirq(struct physdev_get_free_pirq *get)
 {
-    return -ENOTSUP;
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int pirq;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    QEMU_LOCK_GUARD(&s->port_lock);
+
+    pirq = allocate_pirq(s, get->type, IRQ_UNBOUND);
+    if (pirq < 0) {
+        return pirq;
+    }
+
+    get->pirq = pirq;
+    trace_kvm_xen_get_free_pirq(pirq, get->type);
+    return 0;
 }
 
 struct xenevtchn_handle *xen_be_evtchn_open(void)
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index 352c875976..a7383f760c 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -45,6 +45,7 @@ struct evtchn_status;
 struct evtchn_close;
 struct evtchn_unmask;
 struct evtchn_bind_virq;
+struct evtchn_bind_pirq;
 struct evtchn_bind_ipi;
 struct evtchn_send;
 struct evtchn_alloc_unbound;
@@ -55,6 +56,7 @@ int xen_evtchn_status_op(struct evtchn_status *status);
 int xen_evtchn_close_op(struct evtchn_close *close);
 int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
 int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq);
+int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq);
 int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi);
 int xen_evtchn_send_op(struct evtchn_send *send);
 int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc);
diff --git a/meson.build b/meson.build
index 1d8aa323e7..10360d2ec2 100644
--- a/meson.build
+++ b/meson.build
@@ -2941,6 +2941,7 @@ if have_system
     'hw/i2c',
     'hw/i386',
     'hw/i386/xen',
+    'hw/i386/kvm',
     'hw/ide',
     'hw/input',
     'hw/intc',
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index 54cdb75bad..e6bd564c20 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -1245,6 +1245,21 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
         }
         break;
     }
+    case EVTCHNOP_bind_pirq: {
+        struct evtchn_bind_pirq pirq;
+
+        qemu_build_assert(sizeof(pirq) == 12);
+        if (kvm_copy_from_gva(cs, arg, &pirq, sizeof(pirq))) {
+            err = -EFAULT;
+            break;
+        }
+
+        err = xen_evtchn_bind_pirq_op(&pirq);
+        if (!err && kvm_copy_to_gva(cs, arg, &pirq, sizeof(pirq))) {
+            err = -EFAULT;
+        }
+        break;
+    }
     case EVTCHNOP_bind_ipi: {
         struct evtchn_bind_ipi ipi;
 
-- 
2.39.0



  parent reply	other threads:[~2023-01-28  8:18 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-28  8:10 [PATCH v9 00/58] Xen HVM support under KVM David Woodhouse
2023-01-28  8:10 ` [PATCH v9 01/58] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-30  8:41   ` Thomas Huth
2023-01-31  8:26     ` David Woodhouse
2023-01-28  8:10 ` [PATCH v9 02/58] xen: add CONFIG_XEN_BUS and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-28  8:10 ` [PATCH v9 03/58] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-28  8:10 ` [PATCH v9 04/58] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-28  8:10 ` [PATCH v9 05/58] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-28  8:10 ` [PATCH v9 06/58] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-28  8:10 ` [PATCH v9 07/58] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-28  8:10 ` [PATCH v9 08/58] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-28  8:10 ` [PATCH v9 09/58] i386/xen: handle guest hypercalls David Woodhouse
2023-01-28  8:10 ` [PATCH v9 10/58] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-28  8:10 ` [PATCH v9 11/58] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-28  8:10 ` [PATCH v9 12/58] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-28  8:10 ` [PATCH v9 13/58] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-28  8:10 ` [PATCH v9 14/58] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-28  8:10 ` [PATCH v9 15/58] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-28  8:10 ` [PATCH v9 16/58] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-28  8:10 ` [PATCH v9 17/58] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-28  8:10 ` [PATCH v9 18/58] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-28  8:10 ` [PATCH v9 19/58] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-28  8:10 ` [PATCH v9 20/58] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-28  8:10 ` [PATCH v9 21/58] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-28  8:10 ` [PATCH v9 22/58] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-28  8:10 ` [PATCH v9 23/58] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-28  8:10 ` [PATCH v9 24/58] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-28  8:10 ` [PATCH v9 25/58] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-28  8:10 ` [PATCH v9 26/58] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-28  8:10 ` [PATCH v9 27/58] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-28  8:10 ` [PATCH v9 28/58] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-28  8:10 ` [PATCH v9 29/58] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-28  8:10 ` [PATCH v9 30/58] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-28  8:10 ` [PATCH v9 31/58] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-28  8:10 ` [PATCH v9 32/58] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-28  8:10 ` [PATCH v9 33/58] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-28  8:10 ` [PATCH v9 34/58] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-28  8:10 ` [PATCH v9 35/58] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-28  8:10 ` [PATCH v9 36/58] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-28  8:10 ` [PATCH v9 37/58] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-28  8:10 ` [PATCH v9 38/58] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-28  8:10 ` [PATCH v9 39/58] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-28  8:10 ` [PATCH v9 40/58] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-28  8:10 ` [PATCH v9 41/58] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-28  8:10 ` [PATCH v9 42/58] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-28  8:10 ` [PATCH v9 43/58] hw/xen: Support mapping grant frames David Woodhouse
2023-01-28  8:10 ` [PATCH v9 44/58] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-28  8:11 ` [PATCH v9 45/58] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-28  8:11 ` [PATCH v9 46/58] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-28  8:11 ` [PATCH v9 47/58] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-28  8:11 ` [PATCH v9 48/58] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-28  8:11 ` [PATCH v9 49/58] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-28  8:11 ` [PATCH v9 50/58] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-28  8:11 ` [PATCH v9 51/58] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-28  8:11 ` [PATCH v9 52/58] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests David Woodhouse
2023-01-28  8:11 ` [PATCH v9 53/58] i386/xen: Document Xen HVM emulation David Woodhouse
2023-01-28  8:11 ` [PATCH v9 54/58] i386/xen: Implement HYPERVISOR_physdev_op David Woodhouse
2023-01-28  8:11 ` David Woodhouse [this message]
2023-01-28  8:11 ` [PATCH v9 56/58] hw/xen: Support GSI mapping to PIRQ David Woodhouse
2023-01-28  8:11 ` [PATCH v9 57/58] hw/xen: Support MSI " David Woodhouse
2023-01-28  8:11 ` [PATCH v9 58/58] kvm/i386: Add xen-evtchn-max-pirq property David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230128081113.1615111-56-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=armbru@redhat.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).