qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>
Subject: [RFC PATCH v3 27/38] hw/xen: Implement EVTCHNOP_unmask
Date: Fri, 16 Dec 2022 00:41:06 +0000	[thread overview]
Message-ID: <20221216004117.862106-28-dwmw2@infradead.org> (raw)
In-Reply-To: <20221216004117.862106-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

This finally comes with a mechanism for actually injecting events into
the guest vCPU, with all the atomic-test-and-set that's involved in
setting the bit in the shinfo, then the index in the vcpu_info, and
injecting either the lapic vector as MSI, or letting KVM inject the
bare vector.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/i386/kvm/xen_evtchn.c  | 198 ++++++++++++++++++++++++++++++++++++++
 hw/i386/kvm/xen_evtchn.h  |   2 +
 include/sysemu/kvm_xen.h  |  18 ++++
 target/i386/kvm/xen-emu.c |  72 ++++++++++++++
 4 files changed, 290 insertions(+)
 create mode 100644 include/sysemu/kvm_xen.h

diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index d4008e7ee1..50adef0864 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -21,10 +21,13 @@
 
 #include "hw/sysbus.h"
 #include "hw/xen/xen.h"
+
 #include "xen_evtchn.h"
 #include "xen_overlay.h"
 
 #include "sysemu/kvm.h"
+#include "sysemu/kvm_xen.h"
+
 #include <linux/kvm.h>
 
 #include "standard-headers/xen/memory.h"
@@ -39,6 +42,41 @@ typedef struct XenEvtchnPort {
     uint16_t type_val;  /* pirq# / virq# / remote port according to type */
 } XenEvtchnPort;
 
+/* 32-bit compatibility definitions, also used natively in 32-bit build */
+struct compat_arch_vcpu_info {
+    unsigned int cr2;
+    unsigned int pad[5];
+};
+
+struct compat_vcpu_info {
+    uint8_t evtchn_upcall_pending;
+    uint8_t evtchn_upcall_mask;
+    uint16_t pad;
+    uint32_t evtchn_pending_sel;
+    struct compat_arch_vcpu_info arch;
+    struct vcpu_time_info time;
+}; /* 64 bytes (x86) */
+
+struct compat_arch_shared_info {
+    unsigned int max_pfn;
+    unsigned int pfn_to_mfn_frame_list_list;
+    unsigned int nmi_reason;
+    unsigned int p2m_cr3;
+    unsigned int p2m_vaddr;
+    unsigned int p2m_generation;
+    uint32_t wc_sec_hi;
+};
+
+struct compat_shared_info {
+    struct compat_vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
+    uint32_t evtchn_pending[32];
+    uint32_t evtchn_mask[32];
+    uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
+    uint32_t wc_sec;
+    uint32_t wc_nsec;
+    struct compat_arch_shared_info arch;
+};
+
 #define COMPAT_EVTCHN_2L_NR_CHANNELS            1024
 
 /*
@@ -222,6 +260,144 @@ int xen_evtchn_status_op(struct evtchn_status *status)
     return 0;
 }
 
+/*
+ * Never thought I'd hear myself say this, but C++ templates would be
+ * kind of nice here.
+ *
+ * template<class T> static int do_unmask_port(T *shinfo, ...);
+ */
+static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port,
+                             bool do_unmask, struct shared_info *shinfo,
+                             struct vcpu_info *vcpu_info)
+{
+    const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+    typeof(shinfo->evtchn_pending[0]) mask;
+    int idx = port / bits_per_word;
+    int offset = port % bits_per_word;
+
+    mask = 1UL << offset;
+
+    if (idx >= bits_per_word) {
+        return -EINVAL;
+    }
+
+    if (do_unmask) {
+        /* If this is a true unmask operation, clear the mask bit. If
+         * it was already unmasked, we have nothing further to do. */
+        if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+            return 0;
+        }
+    } else {
+        /* This is a pseudo-unmask for affinity changes. We don't
+         * change the mask bit, and if it's *masked* we have nothing
+         * else to do. */
+        if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+            return 0;
+        }
+    }
+
+    /* If the event was not pending, we're done. */
+    if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+        return 0;
+    }
+
+    /* Now on to the vcpu_info evtchn_pending_sel index... */
+    mask = 1UL << idx;
+
+    /* If a port in this word was already pending for this vCPU, all done. */
+    if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+        return 0;
+    }
+
+    /* Set evtchn_upcall_pending for this vCPU */
+    if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+        return 0;
+    }
+
+    kvm_xen_inject_vcpu_callback_vector(s->port_table[port].vcpu);
+
+    return 0;
+}
+
+static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port,
+                                 bool do_unmask,
+                                 struct compat_shared_info *shinfo,
+                                 struct compat_vcpu_info *vcpu_info)
+{
+    const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+    typeof(shinfo->evtchn_pending[0]) mask;
+    int idx = port / bits_per_word;
+    int offset = port % bits_per_word;
+
+    mask = 1UL << offset;
+
+    if (idx >= bits_per_word) {
+        return -EINVAL;
+    }
+
+    if (do_unmask) {
+        /* If this is a true unmask operation, clear the mask bit. If
+         * it was already unmasked, we have nothing further to do. */
+        if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+            return 0;
+        }
+    } else {
+        /* This is a pseudo-unmask for affinity changes. We don't
+         * change the mask bit, and if it's *masked* we have nothing
+         * else to do. */
+        if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+            return 0;
+        }
+    }
+
+    /* If the event was not pending, we're done. */
+    if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+        return 0;
+    }
+
+    /* Now on to the vcpu_info evtchn_pending_sel index... */
+    mask = 1UL << idx;
+
+    /* If a port in this word was already pending for this vCPU, all done. */
+    if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+        return 0;
+    }
+
+    /* Set evtchn_upcall_pending for this vCPU */
+    if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+        return 0;
+    }
+
+    kvm_xen_inject_vcpu_callback_vector(s->port_table[port].vcpu);
+
+    return 0;
+}
+
+static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
+{
+    void *vcpu_info, *shinfo;
+
+    if (s->port_table[port].type == EVTCHNSTAT_closed) {
+        return -EINVAL;
+    }
+
+    shinfo = xen_overlay_page_ptr(XENMAPSPACE_shared_info, 0);
+    if (!shinfo) {
+        return -ENOTSUP;
+    }
+
+    vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
+    if (!vcpu_info) {
+        return -EINVAL;
+    }
+
+    if (xen_is_long_mode()) {
+        return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info);
+    } else {
+        return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info);
+    }
+}
+
 static void free_port(XenEvtchnState *s, evtchn_port_t port)
 {
     s->port_table[port].type = EVTCHNSTAT_closed;
@@ -272,3 +448,25 @@ int xen_evtchn_close_op(struct evtchn_close *close)
 
     return ret;
 }
+
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int ret;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    if (!valid_port(unmask->port)) {
+        return -EINVAL;
+    }
+
+    qemu_mutex_lock(&s->port_lock);
+
+    ret = unmask_port(s, unmask->port, true);
+
+    qemu_mutex_unlock(&s->port_lock);
+
+    return ret;
+}
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index 4c00000315..2fb7d70043 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -15,5 +15,7 @@ int xen_evtchn_set_callback_param(uint64_t param);
 
 struct evtchn_status;
 struct evtchn_close;
+struct evtchn_unmask;
 int xen_evtchn_status_op(struct evtchn_status *status);
 int xen_evtchn_close_op(struct evtchn_close *close);
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
diff --git a/include/sysemu/kvm_xen.h b/include/sysemu/kvm_xen.h
new file mode 100644
index 0000000000..ab629feb13
--- /dev/null
+++ b/include/sysemu/kvm_xen.h
@@ -0,0 +1,18 @@
+/*
+ * Xen HVM emulation support in KVM
+ *
+ * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_SYSEMU_KVM_XEN_H
+#define QEMU_SYSEMU_KVM_XEN_H
+
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
+void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id);
+
+#endif /* QEMU_SYSEMU_KVM_XEN_H */
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index f57d99f9d6..51cb6bf052 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -15,10 +15,13 @@
 #include "qemu/log.h"
 #include "hw/xen/xen.h"
 #include "sysemu/kvm_int.h"
+#include "sysemu/kvm_xen.h"
 #include "kvm/kvm_i386.h"
 #include "exec/address-spaces.h"
 #include "xen-emu.h"
 #include "trace.h"
+#include "hw/pci/msi.h"
+#include "hw/i386/apic-msidef.h"
 #include "hw/i386/kvm/xen_overlay.h"
 #include "hw/i386/kvm/xen_evtchn.h"
 #include "sysemu/runstate.h"
@@ -227,6 +230,63 @@ static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
                           env->xen_vcpu_info_gpa);
 }
 
+
+static void *gpa_to_hva(uint64_t gpa)
+{
+    MemoryRegionSection mrs;
+
+    mrs = memory_region_find(get_system_memory(), gpa, 1);
+    return !mrs.mr ? NULL : qemu_map_ram_ptr(mrs.mr->ram_block,
+                                             mrs.offset_within_region);
+}
+
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
+{
+    CPUState *cs = qemu_get_cpu(vcpu_id);
+    CPUX86State *env;
+    uint64_t gpa;
+
+    if (!cs) {
+        return NULL;
+    }
+    env = &X86_CPU(cs)->env;
+
+    gpa = env->xen_vcpu_info_gpa;
+    if (gpa == UINT64_MAX)
+        gpa = env->xen_vcpu_info_default_gpa;
+    if (gpa == UINT64_MAX)
+        return NULL;
+
+    return gpa_to_hva(gpa);
+}
+
+void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id)
+{
+    CPUState *cs = qemu_get_cpu(vcpu_id);
+    uint8_t vector;
+
+    if (!cs) {
+        return;
+    }
+    vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
+
+    if (vector) {
+        /* The per-vCPU callback vector injected via lapic. Just
+         * deliver it as an MSI. */
+        MSIMessage msg = {
+            .address = APIC_DEFAULT_ADDRESS | X86_CPU(cs)->apic_id,
+            .data = vector | (1UL << MSI_DATA_LEVEL_SHIFT),
+        };
+        kvm_irqchip_send_msi(kvm_state, msg);
+        return;
+    }
+
+    /* If the evtchn_upcall_pending field in the vcpu_info is set, then
+     * KVM will automatically deliver the vector on entering the vCPU
+     * so all we have to do is kick it out. */
+    qemu_cpu_kick(cs);
+}
+
 static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
 {
     X86CPU *cpu = X86_CPU(cs);
@@ -652,6 +712,18 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
         err = xen_evtchn_close_op(&close);
         break;
     }
+    case EVTCHNOP_unmask: {
+        struct evtchn_unmask unmask;
+
+        qemu_build_assert(sizeof(unmask) == 4);
+        if (kvm_copy_from_gva(cs, arg, &unmask, sizeof(unmask))) {
+            err = -EFAULT;
+            break;
+        }
+
+        err = xen_evtchn_unmask_op(&unmask);
+        break;
+    }
     default:
         return false;
     }
-- 
2.35.3



  parent reply	other threads:[~2022-12-16  0:48 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-16  0:40 [RFC PATCH v3 00/38] Xen HVM support under KVM David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 01/38] include: import xen public headers David Woodhouse
2022-12-20 14:14   ` Paul Durrant
2022-12-16  0:40 ` [RFC PATCH v3 02/38] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2022-12-20 14:33   ` Paul Durrant
2022-12-16  0:40 ` [RFC PATCH v3 03/38] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2022-12-20 14:39   ` Paul Durrant
2022-12-20 22:59     ` David Woodhouse
2022-12-21  9:49       ` Paul Durrant
2022-12-16  0:40 ` [RFC PATCH v3 04/38] i386/kvm: Add xen-version machine property and init KVM Xen support David Woodhouse
2022-12-20 14:47   ` Paul Durrant
2022-12-16  0:40 ` [RFC PATCH v3 05/38] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2022-12-20 14:58   ` Paul Durrant
2022-12-16  0:40 ` [RFC PATCH v3 06/38] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2022-12-20 16:18   ` Paul Durrant
2022-12-16  0:40 ` [RFC PATCH v3 07/38] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2022-12-20 16:19   ` Paul Durrant
2022-12-20 16:29     ` David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 08/38] hw/xen_backend: refactor xen_be_init() David Woodhouse
2022-12-20 16:22   ` Paul Durrant
2022-12-16  0:40 ` [RFC PATCH v3 09/38] i386/xen: handle guest hypercalls David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 10/38] i386/xen: implement HYPERCALL_xen_version David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 11/38] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 12/38] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 13/38] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 14/38] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 15/38] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 16/38] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 17/38] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 18/38] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 19/38] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2022-12-16  0:40 ` [RFC PATCH v3 20/38] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 21/38] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 22/38] i386/xen: HVMOP_set_param / HVM_PARAM_CALLBACK_IRQ David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 23/38] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 24/38] i386/xen: implement HYPERVISOR_sched_op David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 25/38] hw/xen: Implement EVTCHNOP_status David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 26/38] hw/xen: Implement EVTCHNOP_close David Woodhouse
2022-12-16  0:41 ` David Woodhouse [this message]
2022-12-16  0:41 ` [RFC PATCH v3 28/38] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 29/38] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 30/38] hw/xen: Implement EVTCHNOP_send David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 31/38] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 32/38] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 33/38] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 34/38] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 35/38] i386/xen: add monitor commands to test event injection David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 36/38] i386/xen: Implement SCHEDOP_poll David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 37/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2022-12-16  0:41 ` [RFC PATCH v3 38/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221216004117.862106-28-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).