From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"Paul Durrant" <paul@xen.org>,
"Joao Martins" <joao.m.martins@oracle.com>,
"Ankur Arora" <ankur.a.arora@oracle.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Thomas Huth" <thuth@redhat.com>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Juan Quintela" <quintela@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Claudio Fontana" <cfontana@suse.de>
Subject: [RFC PATCH v3 30/38] hw/xen: Implement EVTCHNOP_send
Date: Fri, 16 Dec 2022 00:41:09 +0000 [thread overview]
Message-ID: <20221216004117.862106-31-dwmw2@infradead.org> (raw)
In-Reply-To: <20221216004117.862106-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
hw/i386/kvm/xen_evtchn.c | 162 ++++++++++++++++++++++++++++++++++++++
hw/i386/kvm/xen_evtchn.h | 2 +
target/i386/kvm/xen-emu.c | 12 +++
3 files changed, 176 insertions(+)
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 2e35812b32..d90a92a25a 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -441,6 +441,117 @@ static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
}
}
+static int do_set_port_lm(XenEvtchnState *s, evtchn_port_t port,
+ struct shared_info *shinfo,
+ struct vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ /* Update the pending bit itself. If it was already set, we're done. */
+ if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
+ return 0;
+ }
+
+ /* Check if it's masked. */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ kvm_xen_inject_vcpu_callback_vector(s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int do_set_port_compat(XenEvtchnState *s, evtchn_port_t port,
+ struct compat_shared_info *shinfo,
+ struct compat_vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ /* Update the pending bit itself. If it was already set, we're done. */
+ if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
+ return 0;
+ }
+
+ /* Check if it's masked. */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ kvm_xen_inject_vcpu_callback_vector(s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int set_port_pending(XenEvtchnState *s, evtchn_port_t port)
+{
+ void *vcpu_info, *shinfo;
+
+ if (s->port_table[port].type == EVTCHNSTAT_closed) {
+ return -EINVAL;
+ }
+
+ shinfo = xen_overlay_page_ptr(XENMAPSPACE_shared_info, 0);
+ if (!shinfo) {
+ return -ENOTSUP;
+ }
+
+ vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
+ if (!vcpu_info) {
+ return -EINVAL;
+ }
+
+ if (xen_is_long_mode()) {
+ return do_set_port_lm(s, port, shinfo, vcpu_info);
+ } else {
+ return do_set_port_compat(s, port, shinfo, vcpu_info);
+ }
+}
+
static bool virq_is_global(uint32_t virq)
{
switch (virq) {
@@ -620,3 +731,54 @@ int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi)
return ret;
}
+
+int xen_evtchn_send_op(struct evtchn_send *send)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ XenEvtchnPort *p;
+ int ret = 0;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_port(send->port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ p = &s->port_table[send->port];
+
+ switch(p->type) {
+ case EVTCHNSTAT_interdomain:
+ if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
+ /* This is an event from the guest to qemu itself, which is
+ * serving as the driver domain. Not yet implemented; it will
+ * be hooked up to the qemu implementation of xenstore,
+ * console, PV net/block drivers etc. */
+ ret = -ENOSYS;
+ } else {
+ /* Loopback interdomain ports; just a complex IPI */
+ set_port_pending(s, p->type_val);
+ }
+ break;
+
+ case EVTCHNSTAT_ipi:
+ set_port_pending(s, send->port);
+ break;
+
+ case EVTCHNSTAT_unbound:
+ /* Xen will silently drop these */
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index 52ade5a64e..c27b9e8096 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -18,8 +18,10 @@ struct evtchn_close;
struct evtchn_unmask;
struct evtchn_bind_virq;
struct evtchn_bind_ipi;
+struct evtchn_send;
int xen_evtchn_status_op(struct evtchn_status *status);
int xen_evtchn_close_op(struct evtchn_close *close);
int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq);
int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi);
+int xen_evtchn_send_op(struct evtchn_send *send);
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index b5f8f30d62..300b0d75bc 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -800,6 +800,18 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
}
break;
}
+ case EVTCHNOP_send: {
+ struct evtchn_send send;
+
+ qemu_build_assert(sizeof(send) == 4);
+ if (kvm_copy_from_gva(cs, arg, &send, sizeof(send))) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = xen_evtchn_send_op(&send);
+ break;
+ }
default:
return false;
}
--
2.35.3
next prev parent reply other threads:[~2022-12-16 0:52 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-16 0:40 [RFC PATCH v3 00/38] Xen HVM support under KVM David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 01/38] include: import xen public headers David Woodhouse
2022-12-20 14:14 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 02/38] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2022-12-20 14:33 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 03/38] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2022-12-20 14:39 ` Paul Durrant
2022-12-20 22:59 ` David Woodhouse
2022-12-21 9:49 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 04/38] i386/kvm: Add xen-version machine property and init KVM Xen support David Woodhouse
2022-12-20 14:47 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 05/38] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2022-12-20 14:58 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 06/38] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2022-12-20 16:18 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 07/38] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2022-12-20 16:19 ` Paul Durrant
2022-12-20 16:29 ` David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 08/38] hw/xen_backend: refactor xen_be_init() David Woodhouse
2022-12-20 16:22 ` Paul Durrant
2022-12-16 0:40 ` [RFC PATCH v3 09/38] i386/xen: handle guest hypercalls David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 10/38] i386/xen: implement HYPERCALL_xen_version David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 11/38] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 12/38] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 13/38] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 14/38] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 15/38] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 16/38] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 17/38] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 18/38] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 19/38] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2022-12-16 0:40 ` [RFC PATCH v3 20/38] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 21/38] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 22/38] i386/xen: HVMOP_set_param / HVM_PARAM_CALLBACK_IRQ David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 23/38] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 24/38] i386/xen: implement HYPERVISOR_sched_op David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 25/38] hw/xen: Implement EVTCHNOP_status David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 26/38] hw/xen: Implement EVTCHNOP_close David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 27/38] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 28/38] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 29/38] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2022-12-16 0:41 ` David Woodhouse [this message]
2022-12-16 0:41 ` [RFC PATCH v3 31/38] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 32/38] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 33/38] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 34/38] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 35/38] i386/xen: add monitor commands to test event injection David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 36/38] i386/xen: Implement SCHEDOP_poll David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 37/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2022-12-16 0:41 ` [RFC PATCH v3 38/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221216004117.862106-31-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=alex.bennee@linaro.org \
--cc=ankur.a.arora@oracle.com \
--cc=cfontana@suse.de \
--cc=dgilbert@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).