qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: Peter Maydell <peter.maydell@linaro.org>, qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
	armbru@redhat.com
Subject: [PATCH v9 38/58] i386/xen: add monitor commands to test event injection
Date: Sat, 28 Jan 2023 08:10:53 +0000	[thread overview]
Message-ID: <20230128081113.1615111-39-dwmw2@infradead.org> (raw)
In-Reply-To: <20230128081113.1615111-1-dwmw2@infradead.org>

From: Joao Martins <joao.m.martins@oracle.com>

Specifically add listing, injection of event channels.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
 hmp-commands.hx          |  29 +++++++++
 hw/i386/kvm/xen_evtchn.c | 137 +++++++++++++++++++++++++++++++++++++++
 include/monitor/hmp.h    |   2 +
 qapi/misc-target.json    | 116 +++++++++++++++++++++++++++++++++
 4 files changed, 284 insertions(+)

diff --git a/hmp-commands.hx b/hmp-commands.hx
index 673e39a697..fd77c432c0 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -1815,3 +1815,32 @@ SRST
   Dump the FDT in dtb format to *filename*.
 ERST
 #endif
+
+#if defined(CONFIG_XEN_EMU)
+    {
+        .name       = "xen-event-inject",
+        .args_type  = "port:i",
+        .params     = "port",
+        .help       = "inject event channel",
+        .cmd        = hmp_xen_event_inject,
+    },
+
+SRST
+``xen-event-inject`` *port*
+  Notify guest via event channel on port *port*.
+ERST
+
+
+    {
+        .name       = "xen-event-list",
+        .args_type  = "",
+        .params     = "",
+        .help       = "list event channel state",
+        .cmd        = hmp_xen_event_list,
+    },
+
+SRST
+``xen-event-list``
+  List event channels in the guest
+ERST
+#endif
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index bf4e3c61a8..5fdb301f54 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -14,7 +14,11 @@
 #include "qemu/module.h"
 #include "qemu/main-loop.h"
 #include "qemu/log.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp.h"
 #include "qapi/error.h"
+#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qmp/qdict.h"
 #include "qom/object.h"
 #include "exec/target_page.h"
 #include "exec/address-spaces.h"
@@ -1066,3 +1070,136 @@ int xen_evtchn_send_op(struct evtchn_send *send)
     return ret;
 }
 
+EvtchnInfoList *qmp_xen_event_list(Error **errp)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    EvtchnInfoList *head = NULL, **tail = &head;
+    void *shinfo, *pending, *mask;
+    int i;
+
+    if (!s) {
+        error_setg(errp, "Xen event channel emulation not enabled");
+        return NULL;
+    }
+
+    shinfo = xen_overlay_get_shinfo_ptr();
+    if (!shinfo) {
+        error_setg(errp, "Xen shared info page not allocated");
+        return NULL;
+    }
+
+    if (xen_is_long_mode()) {
+        pending = shinfo + offsetof(struct shared_info, evtchn_pending);
+        mask = shinfo + offsetof(struct shared_info, evtchn_mask);
+    } else {
+        pending = shinfo + offsetof(struct compat_shared_info, evtchn_pending);
+        mask = shinfo + offsetof(struct compat_shared_info, evtchn_mask);
+    }
+
+    QEMU_LOCK_GUARD(&s->port_lock);
+
+    for (i = 0; i < s->nr_ports; i++) {
+        XenEvtchnPort *p = &s->port_table[i];
+        EvtchnInfo *info;
+
+        if (p->type == EVTCHNSTAT_closed) {
+            continue;
+        }
+
+        info = g_new0(EvtchnInfo, 1);
+
+        info->port = i;
+        qemu_build_assert(EVTCHN_PORT_TYPE_CLOSED == EVTCHNSTAT_closed);
+        qemu_build_assert(EVTCHN_PORT_TYPE_UNBOUND == EVTCHNSTAT_unbound);
+        qemu_build_assert(EVTCHN_PORT_TYPE_INTERDOMAIN == EVTCHNSTAT_interdomain);
+        qemu_build_assert(EVTCHN_PORT_TYPE_PIRQ == EVTCHNSTAT_pirq);
+        qemu_build_assert(EVTCHN_PORT_TYPE_VIRQ == EVTCHNSTAT_virq);
+        qemu_build_assert(EVTCHN_PORT_TYPE_IPI == EVTCHNSTAT_ipi);
+
+        info->type = p->type;
+        if (p->type == EVTCHNSTAT_interdomain) {
+            info->remote_domain = g_strdup((p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) ?
+                                           "qemu" : "loopback");
+            info->target = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK;
+        } else {
+            info->target = p->type_val;
+        }
+        info->vcpu = p->vcpu;
+        info->pending = test_bit(i, pending);
+        info->masked = test_bit(i, mask);
+
+        QAPI_LIST_APPEND(tail, info);
+    }
+
+    return head;
+}
+
+void qmp_xen_event_inject(uint32_t port, Error **errp)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+
+    if (!s) {
+        error_setg(errp, "Xen event channel emulation not enabled");
+        return;
+    }
+
+    if (!valid_port(port)) {
+        error_setg(errp, "Invalid port %u", port);
+    }
+
+    QEMU_LOCK_GUARD(&s->port_lock);
+
+    if (set_port_pending(s, port)) {
+        error_setg(errp, "Failed to set port %u", port);
+        return;
+    }
+}
+
+void hmp_xen_event_list(Monitor *mon, const QDict *qdict)
+{
+    EvtchnInfoList *iter, *info_list;
+    Error *err = NULL;
+
+    info_list = qmp_xen_event_list(&err);
+    if (err) {
+        hmp_handle_error(mon, err);
+        return;
+    }
+
+    for (iter = info_list; iter; iter = iter->next) {
+        EvtchnInfo *info = iter->value;
+
+        monitor_printf(mon, "port %4lu: vcpu: %ld %s", info->port, info->vcpu,
+                       EvtchnPortType_str(info->type));
+        if (info->type != EVTCHN_PORT_TYPE_IPI) {
+            monitor_printf(mon,  "(");
+            if (info->remote_domain) {
+                monitor_printf(mon, "%s:", info->remote_domain);
+            }
+            monitor_printf(mon, "%ld)", info->target);
+        }
+        if (info->pending) {
+            monitor_printf(mon, " PENDING");
+        }
+        if (info->masked) {
+            monitor_printf(mon, " MASKED");
+        }
+        monitor_printf(mon, "\n");
+    }
+
+    qapi_free_EvtchnInfoList(info_list);
+}
+
+void hmp_xen_event_inject(Monitor *mon, const QDict *qdict)
+{
+    int port = qdict_get_int(qdict, "port");
+    Error *err = NULL;
+
+    qmp_xen_event_inject(port, &err);
+    if (err) {
+        hmp_handle_error(mon, err);
+    } else {
+        monitor_printf(mon, "Delivered port %d\n", port);
+    }
+}
+
diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h
index 1b3bdcb446..cbc6e9bb91 100644
--- a/include/monitor/hmp.h
+++ b/include/monitor/hmp.h
@@ -108,6 +108,8 @@ void hmp_virtio_status(Monitor *mon, const QDict *qdict);
 void hmp_virtio_queue_status(Monitor *mon, const QDict *qdict);
 void hmp_vhost_queue_status(Monitor *mon, const QDict *qdict);
 void hmp_virtio_queue_element(Monitor *mon, const QDict *qdict);
+void hmp_xen_event_inject(Monitor *mon, const QDict *qdict);
+void hmp_xen_event_list(Monitor *mon, const QDict *qdict);
 void object_add_completion(ReadLineState *rs, int nb_args, const char *str);
 void object_del_completion(ReadLineState *rs, int nb_args, const char *str);
 void device_add_completion(ReadLineState *rs, int nb_args, const char *str);
diff --git a/qapi/misc-target.json b/qapi/misc-target.json
index 5b6a8e9185..652e6e0b37 100644
--- a/qapi/misc-target.json
+++ b/qapi/misc-target.json
@@ -380,3 +380,119 @@
 #
 ##
 { 'command': 'query-sgx-capabilities', 'returns': 'SGXInfo', 'if': 'TARGET_I386' }
+
+
+##
+# @EvtchnPortType:
+#
+# An enumeration of Xen event channel port types.
+#
+# @closed: The port is unused.
+#
+# @unbound: The port is allocated and ready to be bound.
+#
+# @interdomain: The port is connected as an interdomain interrupt.
+#
+# @pirq: The port is bound to a physical IRQ (PIRQ).
+#
+# @virq: The port is bound to a virtual IRQ (VIRQ).
+#
+# @ipi: The post is an inter-processor interrupt (IPI).
+#
+# Since: 8.0.0
+##
+{ 'enum': 'EvtchnPortType',
+  'data': ['closed', 'unbound', 'interdomain', 'pirq', 'virq', 'ipi'],
+  'if': 'TARGET_I386' }
+
+##
+# @EvtchnInfo:
+#
+# Information about a Xen event channel port
+#
+# @port: the port number
+#
+# @vcpu: target vCPU for this port
+#
+# @type: the port type
+#
+# @remote-domain: remote domain for interdomain ports
+#
+# @target: remote port ID, or virq/pirq number
+#
+# @pending: port is currently active pending delivery
+#
+# @masked: port is masked
+#
+# Since: 8.0.0
+##
+{ 'struct': 'EvtchnInfo',
+  'data': {'port': 'int',
+           'vcpu': 'int',
+           'type': 'EvtchnPortType',
+           'remote-domain': 'str',
+           'target': 'int',
+           'pending': 'bool',
+           'masked': 'bool'},
+  'if': 'TARGET_I386' }
+
+
+##
+# @xen-event-list:
+#
+# Query the Xen event channels opened by the guest.
+#
+# Returns: list of open event channel ports.
+#
+# Since: 8.0.0
+#
+# Example:
+#
+# -> { "execute": "xen-event-list" }
+# <- { "return": [
+#         {
+#             "pending": false,
+#             "port": 1,
+#             "vcpu": 1,
+#             "remote-domain": "qemu",
+#             "masked": false,
+#             "type": "interdomain",
+#             "target": 1
+#         },
+#         {
+#             "pending": false,
+#             "port": 2,
+#             "vcpu": 0,
+#             "remote-domain": "",
+#             "masked": false,
+#             "type": "virq",
+#             "target": 0
+#         }
+#      ]
+#    }
+#
+##
+{ 'command': 'xen-event-list',
+  'returns': ['EvtchnInfo'],
+  'if': 'TARGET_I386' }
+
+##
+# @xen-event-inject:
+#
+# Inject a Xen event channel port (interrupt) to the guest.
+#
+# @port: The port number
+#
+# Returns: - Nothing on success.
+#
+# Since: 8.0.0
+#
+# Example:
+#
+# -> { "execute": "xen-event-inject", "arguments": { "port": 1 } }
+# <- { "return": { } }
+#
+##
+{ 'command': 'xen-event-inject',
+  'data': { 'port': 'uint32' },
+  'if': 'TARGET_I386' }
-- 
2.39.0



  parent reply	other threads:[~2023-01-28  8:19 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-28  8:10 [PATCH v9 00/58] Xen HVM support under KVM David Woodhouse
2023-01-28  8:10 ` [PATCH v9 01/58] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-30  8:41   ` Thomas Huth
2023-01-31  8:26     ` David Woodhouse
2023-01-28  8:10 ` [PATCH v9 02/58] xen: add CONFIG_XEN_BUS and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-28  8:10 ` [PATCH v9 03/58] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-28  8:10 ` [PATCH v9 04/58] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-28  8:10 ` [PATCH v9 05/58] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-28  8:10 ` [PATCH v9 06/58] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-28  8:10 ` [PATCH v9 07/58] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-28  8:10 ` [PATCH v9 08/58] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-28  8:10 ` [PATCH v9 09/58] i386/xen: handle guest hypercalls David Woodhouse
2023-01-28  8:10 ` [PATCH v9 10/58] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-28  8:10 ` [PATCH v9 11/58] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-28  8:10 ` [PATCH v9 12/58] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-28  8:10 ` [PATCH v9 13/58] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-28  8:10 ` [PATCH v9 14/58] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-28  8:10 ` [PATCH v9 15/58] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-28  8:10 ` [PATCH v9 16/58] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-28  8:10 ` [PATCH v9 17/58] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-28  8:10 ` [PATCH v9 18/58] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-28  8:10 ` [PATCH v9 19/58] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-28  8:10 ` [PATCH v9 20/58] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-28  8:10 ` [PATCH v9 21/58] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-28  8:10 ` [PATCH v9 22/58] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-28  8:10 ` [PATCH v9 23/58] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-28  8:10 ` [PATCH v9 24/58] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-28  8:10 ` [PATCH v9 25/58] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-28  8:10 ` [PATCH v9 26/58] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-28  8:10 ` [PATCH v9 27/58] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-28  8:10 ` [PATCH v9 28/58] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-28  8:10 ` [PATCH v9 29/58] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-28  8:10 ` [PATCH v9 30/58] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-28  8:10 ` [PATCH v9 31/58] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-28  8:10 ` [PATCH v9 32/58] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-28  8:10 ` [PATCH v9 33/58] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-28  8:10 ` [PATCH v9 34/58] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-28  8:10 ` [PATCH v9 35/58] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-28  8:10 ` [PATCH v9 36/58] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-28  8:10 ` [PATCH v9 37/58] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-28  8:10 ` David Woodhouse [this message]
2023-01-28  8:10 ` [PATCH v9 39/58] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-28  8:10 ` [PATCH v9 40/58] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-28  8:10 ` [PATCH v9 41/58] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-28  8:10 ` [PATCH v9 42/58] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-28  8:10 ` [PATCH v9 43/58] hw/xen: Support mapping grant frames David Woodhouse
2023-01-28  8:10 ` [PATCH v9 44/58] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-28  8:11 ` [PATCH v9 45/58] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-28  8:11 ` [PATCH v9 46/58] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-28  8:11 ` [PATCH v9 47/58] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-28  8:11 ` [PATCH v9 48/58] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-28  8:11 ` [PATCH v9 49/58] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-28  8:11 ` [PATCH v9 50/58] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-28  8:11 ` [PATCH v9 51/58] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-28  8:11 ` [PATCH v9 52/58] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests David Woodhouse
2023-01-28  8:11 ` [PATCH v9 53/58] i386/xen: Document Xen HVM emulation David Woodhouse
2023-01-28  8:11 ` [PATCH v9 54/58] i386/xen: Implement HYPERVISOR_physdev_op David Woodhouse
2023-01-28  8:11 ` [PATCH v9 55/58] hw/xen: Implement emulated PIRQ hypercall support David Woodhouse
2023-01-28  8:11 ` [PATCH v9 56/58] hw/xen: Support GSI mapping to PIRQ David Woodhouse
2023-01-28  8:11 ` [PATCH v9 57/58] hw/xen: Support MSI " David Woodhouse
2023-01-28  8:11 ` [PATCH v9 58/58] kvm/i386: Add xen-evtchn-max-pirq property David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230128081113.1615111-39-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=armbru@redhat.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).