qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: Peter Maydell <peter.maydell@linaro.org>, qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
	armbru@redhat.com
Subject: [PATCH v8 27/58] i386/xen: Add support for Xen event channel delivery to vCPU
Date: Fri, 20 Jan 2023 13:13:12 +0000	[thread overview]
Message-ID: <20230120131343.1441939-28-dwmw2@infradead.org> (raw)
In-Reply-To: <20230120131343.1441939-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

The kvm_xen_inject_vcpu_callback_vector() function will either deliver
the per-vCPU local APIC vector (as an MSI), or just kick the vCPU out
of the kernel to trigger KVM's automatic delivery of the global vector.
Support for asserting the GSI/PCI_INTX callbacks will come later.

Also add kvm_xen_get_vcpu_info_hva() which returns the vcpu_info of
a given vCPU.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
---
 include/sysemu/kvm_xen.h  |  2 +
 target/i386/cpu.h         |  2 +
 target/i386/kvm/xen-emu.c | 91 ++++++++++++++++++++++++++++++++++++---
 3 files changed, 89 insertions(+), 6 deletions(-)

diff --git a/include/sysemu/kvm_xen.h b/include/sysemu/kvm_xen.h
index d227a7982b..a7260f5d72 100644
--- a/include/sysemu/kvm_xen.h
+++ b/include/sysemu/kvm_xen.h
@@ -18,6 +18,8 @@
 
 int kvm_xen_soft_reset(void);
 uint32_t kvm_xen_get_caps(void);
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
+void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type);
 
 #define kvm_xen_has_cap(cap) (!!(kvm_xen_get_caps() &           \
                                  KVM_XEN_HVM_CONFIG_ ## cap))
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 938a1b9c8b..c9b12e7476 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1788,6 +1788,8 @@ typedef struct CPUArchState {
 #endif
 #if defined(CONFIG_KVM)
     struct kvm_nested_state *nested_state;
+    MemoryRegion *xen_vcpu_info_mr;
+    void *xen_vcpu_info_hva;
     uint64_t xen_vcpu_info_gpa;
     uint64_t xen_vcpu_info_default_gpa;
     uint64_t xen_vcpu_time_info_gpa;
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index 25a0b6f1b0..35b71eb304 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -21,6 +21,8 @@
 #include "trace.h"
 #include "sysemu/runstate.h"
 
+#include "hw/pci/msi.h"
+#include "hw/i386/apic-msidef.h"
 #include "hw/i386/kvm/xen_overlay.h"
 #include "hw/i386/kvm/xen_evtchn.h"
 
@@ -248,6 +250,40 @@ static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data)
     }
 }
 
+static int set_vcpu_info(CPUState *cs, uint64_t gpa)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    MemoryRegionSection mrs = { .mr = NULL };
+    void *vcpu_info_hva = NULL;
+    int ret;
+
+    ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa);
+    if (ret || gpa == INVALID_GPA) {
+        goto out;
+    }
+
+    mrs = memory_region_find(get_system_memory(), gpa,
+                             sizeof(struct vcpu_info));
+    if (!mrs.mr) {
+        ret = -EINVAL;
+    } else if (!mrs.mr->ram_block || mrs.size < sizeof(struct vcpu_info) ||
+               !(vcpu_info_hva = qemu_map_ram_ptr(mrs.mr->ram_block,
+                                                  mrs.offset_within_region))) {
+        ret = -EINVAL;
+        memory_region_unref(mrs.mr);
+        mrs.mr = NULL;
+    }
+
+ out:
+    if (env->xen_vcpu_info_mr) {
+        memory_region_unref(env->xen_vcpu_info_mr);
+    }
+    env->xen_vcpu_info_hva = vcpu_info_hva;
+    env->xen_vcpu_info_mr = mrs.mr;
+    return ret;
+}
+
 static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
 {
     X86CPU *cpu = X86_CPU(cs);
@@ -257,8 +293,7 @@ static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
 
     /* Changing the default does nothing if a vcpu_info was explicitly set. */
     if (env->xen_vcpu_info_gpa == INVALID_GPA) {
-        kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
-                              env->xen_vcpu_info_default_gpa);
+        set_vcpu_info(cs, env->xen_vcpu_info_default_gpa);
     }
 }
 
@@ -269,8 +304,52 @@ static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
 
     env->xen_vcpu_info_gpa = data.host_ulong;
 
-    kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
-                          env->xen_vcpu_info_gpa);
+    set_vcpu_info(cs, env->xen_vcpu_info_gpa);
+}
+
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
+{
+    CPUState *cs = qemu_get_cpu(vcpu_id);
+    if (!cs) {
+        return NULL;
+    }
+
+    return X86_CPU(cs)->env.xen_vcpu_info_hva;
+}
+
+void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type)
+{
+    CPUState *cs = qemu_get_cpu(vcpu_id);
+    uint8_t vector;
+
+    if (!cs) {
+        return;
+    }
+
+    vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
+    if (vector) {
+        /*
+         * The per-vCPU callback vector injected via lapic. Just
+         * deliver it as an MSI.
+         */
+        MSIMessage msg = {
+            .address = APIC_DEFAULT_ADDRESS | X86_CPU(cs)->apic_id,
+            .data = vector | (1UL << MSI_DATA_LEVEL_SHIFT),
+        };
+        kvm_irqchip_send_msi(kvm_state, msg);
+        return;
+    }
+
+    switch (type) {
+    case HVM_PARAM_CALLBACK_TYPE_VECTOR:
+        /*
+         * If the evtchn_upcall_pending field in the vcpu_info is set, then
+         * KVM will automatically deliver the vector on entering the vCPU
+         * so all we have to do is kick it out.
+         */
+        qemu_cpu_kick(cs);
+        break;
+    }
 }
 
 static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
@@ -306,7 +385,7 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
     env->xen_vcpu_runstate_gpa = INVALID_GPA;
     env->xen_vcpu_callback_vector = 0;
 
-    kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
+    set_vcpu_info(cs, INVALID_GPA);
     kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
                           INVALID_GPA);
     kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
@@ -903,7 +982,7 @@ int kvm_put_xen_state(CPUState *cs)
     }
 
     if (gpa != INVALID_GPA) {
-        ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa);
+        ret = set_vcpu_info(cs, gpa);
         if (ret < 0) {
             return ret;
         }
-- 
2.39.0



  parent reply	other threads:[~2023-01-20 13:27 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-20 13:12 [PATCH 00/58] Xen HVM support under KVM David Woodhouse
2023-01-20 13:12 ` [PATCH v8 01/58] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-20 13:12 ` [PATCH v8 02/58] xen: add CONFIG_XEN_BUS and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-20 13:12 ` [PATCH v8 03/58] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-20 13:12 ` [PATCH v8 04/58] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-20 13:12 ` [PATCH v8 05/58] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-20 13:12 ` [PATCH v8 06/58] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-20 13:12 ` [PATCH v8 07/58] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-20 13:12 ` [PATCH v8 08/58] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-20 13:12 ` [PATCH v8 09/58] i386/xen: handle guest hypercalls David Woodhouse
2023-01-20 13:12 ` [PATCH v8 10/58] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-20 13:12 ` [PATCH v8 11/58] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-20 13:12 ` [PATCH v8 12/58] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-20 13:12 ` [PATCH v8 13/58] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-20 13:12 ` [PATCH v8 14/58] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-20 13:13 ` [PATCH v8 15/58] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-20 13:13 ` [PATCH v8 16/58] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-20 13:13 ` [PATCH v8 17/58] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-20 13:13 ` [PATCH v8 18/58] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-20 13:13 ` [PATCH v8 19/58] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-20 13:13 ` [PATCH v8 20/58] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-20 13:13 ` [PATCH v8 21/58] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-20 13:13 ` [PATCH v8 22/58] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-20 13:13 ` [PATCH v8 23/58] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-20 13:13 ` [PATCH v8 24/58] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-20 13:13 ` [PATCH v8 25/58] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-20 13:13 ` [PATCH v8 26/58] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-27  8:59   ` Paul Durrant
2023-01-20 13:13 ` David Woodhouse [this message]
2023-01-20 13:13 ` [PATCH v8 28/58] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-27  9:03   ` Paul Durrant
2023-01-20 13:13 ` [PATCH v8 29/58] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-20 13:13 ` [PATCH v8 30/58] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-20 13:13 ` [PATCH v8 31/58] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-20 13:13 ` [PATCH v8 32/58] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-20 13:13 ` [PATCH v8 33/58] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-20 13:13 ` [PATCH v8 34/58] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-20 13:13 ` [PATCH v8 35/58] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-20 13:13 ` [PATCH v8 36/58] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-20 13:13 ` [PATCH v8 37/58] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-20 13:13 ` [PATCH v8 38/58] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-20 13:13 ` [PATCH v8 39/58] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-20 13:13 ` [PATCH v8 40/58] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-20 13:13 ` [PATCH v8 41/58] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-20 13:13 ` [PATCH v8 42/58] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-20 13:13 ` [PATCH v8 43/58] hw/xen: Support mapping grant frames David Woodhouse
2023-01-20 13:13 ` [PATCH v8 44/58] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-20 13:13 ` [PATCH v8 45/58] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-20 13:13 ` [PATCH v8 46/58] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-20 13:13 ` [PATCH v8 47/58] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-20 13:13 ` [PATCH v8 48/58] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-20 13:13 ` [PATCH v8 49/58] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-20 13:13 ` [PATCH v8 50/58] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-20 13:13 ` [PATCH v8 51/58] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-20 13:13 ` [PATCH v8 52/58] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests David Woodhouse
2023-01-20 13:13 ` [PATCH v8 53/58] i386/xen: Document Xen HVM emulation David Woodhouse
2023-01-20 13:13 ` [PATCH v8 54/58] i386/xen: Implement HYPERVISOR_physdev_op David Woodhouse
2023-01-20 13:13 ` [PATCH v8 55/58] hw/xen: Implement emulated PIRQ hypercall support David Woodhouse
2023-01-20 13:13 ` [PATCH v8 56/58] hw/xen: Support GSI mapping to PIRQ David Woodhouse
2023-01-20 13:13 ` [PATCH v8 57/58] hw/xen: Support MSI " David Woodhouse
2023-01-20 13:13 ` [PATCH v8 58/58] kvm/i386: Add xen-evtchn-max-pirq property David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230120131343.1441939-28-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=armbru@redhat.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).