qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>
Subject: [PATCH v6 46/51] i386/xen: handle PV timer hypercalls
Date: Tue, 10 Jan 2023 12:20:37 +0000	[thread overview]
Message-ID: <20230110122042.1562155-47-dwmw2@infradead.org> (raw)
In-Reply-To: <20230110122042.1562155-1-dwmw2@infradead.org>

From: Joao Martins <joao.m.martins@oracle.com>

Introduce support for one shot and periodic mode of Xen PV timers,
whereby timer interrupts come through a special virq event channel
with deadlines being set through:

1) set_timer_op hypercall (only oneshot)
2) vcpu_op hypercall for {set,stop}_{singleshot,periodic}_timer
hypercalls

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/i386/kvm/xen_evtchn.c  |  31 +++++
 hw/i386/kvm/xen_evtchn.h  |   2 +
 target/i386/cpu.h         |   5 +
 target/i386/kvm/xen-emu.c | 245 +++++++++++++++++++++++++++++++++++++-
 target/i386/machine.c     |   1 +
 5 files changed, 282 insertions(+), 2 deletions(-)

diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index c82fac69a2..9b9a10fa43 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -1159,6 +1159,37 @@ int xen_evtchn_send_op(struct evtchn_send *send)
     return ret;
 }
 
+int xen_evtchn_set_port(uint16_t port)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    XenEvtchnPort *p;
+    int ret = -EINVAL;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    if (!valid_port(port)) {
+        return -EINVAL;
+    }
+
+    qemu_mutex_lock(&s->port_lock);
+
+    p = &s->port_table[port];
+
+    /* QEMU has no business sending to anything but these */
+    if (p->type == EVTCHNSTAT_virq ||
+        (p->type == EVTCHNSTAT_interdomain &&
+         (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU))) {
+            set_port_pending(s, port);
+            ret = 0;
+    }
+
+    qemu_mutex_unlock(&s->port_lock);
+
+    return ret;
+}
+
 static const char *type_names[] = {
     "closed",
     "unbound",
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index 1f9ffc3f94..d85b45067b 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -20,6 +20,8 @@ int xen_evtchn_set_callback_param(uint64_t param);
 void xen_evtchn_connect_gsis(qemu_irq *system_gsis);
 void xen_evtchn_set_callback_level(int level);
 
+int xen_evtchn_set_port(uint16_t port);
+
 void hmp_xen_event_inject(Monitor *mon, const QDict *qdict);
 void hmp_xen_event_list(Monitor *mon, const QDict *qdict);
 
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index e8718c31e5..b579f0f0f8 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -26,6 +26,7 @@
 #include "exec/cpu-defs.h"
 #include "qapi/qapi-types-common.h"
 #include "qemu/cpu-float.h"
+#include "qemu/timer.h"
 
 #define XEN_NR_VIRQS 24
 
@@ -1800,6 +1801,10 @@ typedef struct CPUArchState {
     bool xen_callback_asserted;
     uint16_t xen_virq[XEN_NR_VIRQS];
     uint64_t xen_singleshot_timer_ns;
+    QEMUTimer *xen_singleshot_timer;
+    uint64_t xen_periodic_timer_period;
+    QEMUTimer *xen_periodic_timer;
+    QemuMutex xen_timers_lock;
 #endif
 #if defined(CONFIG_HVF)
     HVFX86LazyFlags hvf_lflags;
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index a279d7ff89..2a7750bacb 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -37,6 +37,9 @@
 
 #include "xen-compat.h"
 
+static void xen_vcpu_singleshot_timer_event(void *opaque);
+static void xen_vcpu_periodic_timer_event(void *opaque);
+
 #ifdef TARGET_X86_64
 #define hypercall_compat32(longmode) (!(longmode))
 #else
@@ -200,6 +203,23 @@ int kvm_xen_init_vcpu(CPUState *cs)
     env->xen_vcpu_time_info_gpa = INVALID_GPA;
     env->xen_vcpu_runstate_gpa = INVALID_GPA;
 
+    qemu_mutex_init(&env->xen_timers_lock);
+    env->xen_singleshot_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+                                             xen_vcpu_singleshot_timer_event,
+                                             cpu);
+    if (!env->xen_singleshot_timer) {
+        return -ENOMEM;
+    }
+    env->xen_singleshot_timer->opaque = cs;
+
+    env->xen_periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+                                           xen_vcpu_periodic_timer_event,
+                                           cpu);
+    if (!env->xen_periodic_timer) {
+        return -ENOMEM;
+    }
+    env->xen_periodic_timer->opaque = cs;
+
     return 0;
 }
 
@@ -231,7 +251,8 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
                          1 << XENFEAT_writable_descriptor_tables |
                          1 << XENFEAT_auto_translated_physmap |
                          1 << XENFEAT_supervisor_mode_kernel |
-                         1 << XENFEAT_hvm_callback_vector;
+                         1 << XENFEAT_hvm_callback_vector |
+                         1 << XENFEAT_hvm_safe_pvclock;
         }
 
         err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi));
@@ -848,13 +869,192 @@ static int vcpuop_register_runstate_info(CPUState *cs, CPUState *target,
     return 0;
 }
 
+static uint64_t kvm_get_current_ns(void)
+{
+    struct kvm_clock_data data;
+    int ret;
+
+    ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
+    if (ret < 0) {
+        fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret));
+                abort();
+    }
+
+    return data.clock;
+}
+
+static void xen_vcpu_singleshot_timer_event(void *opaque)
+{
+    CPUState *cpu = opaque;
+    CPUX86State *env = &X86_CPU(cpu)->env;
+    uint16_t port = env->xen_virq[VIRQ_TIMER];
+
+    if (likely(port)) {
+        xen_evtchn_set_port(port);
+    }
+
+    qemu_mutex_lock(&env->xen_timers_lock);
+    env->xen_singleshot_timer_ns = 0;
+    qemu_mutex_unlock(&env->xen_timers_lock);
+}
+
+static void xen_vcpu_periodic_timer_event(void *opaque)
+{
+    CPUState *cpu = opaque;
+    CPUX86State *env = &X86_CPU(cpu)->env;
+    uint16_t port = env->xen_virq[VIRQ_TIMER];
+    int64_t qemu_now;
+
+    if (likely(port)) {
+        xen_evtchn_set_port(port);
+    }
+
+    qemu_mutex_lock(&env->xen_timers_lock);
+
+    qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+    timer_mod_ns(env->xen_periodic_timer,
+                 qemu_now + env->xen_periodic_timer_period);
+
+    qemu_mutex_unlock(&env->xen_timers_lock);
+}
+
+static int do_set_periodic_timer(CPUState *target, uint64_t period_ns)
+{
+    CPUX86State *tenv = &X86_CPU(target)->env;
+    int64_t qemu_now;
+
+    timer_del(tenv->xen_periodic_timer);
+
+    qemu_mutex_lock(&tenv->xen_timers_lock);
+
+    qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+    timer_mod_ns(tenv->xen_periodic_timer, qemu_now + period_ns);
+    tenv->xen_periodic_timer_period = period_ns;
+
+    qemu_mutex_unlock(&tenv->xen_timers_lock);
+    return 0;
+}
+
+#define MILLISECS(_ms)  ((int64_t)((_ms) * 1000000ULL))
+#define MICROSECS(_us)  ((int64_t)((_us) * 1000ULL))
+#define STIME_MAX ((time_t)((int64_t)~0ull >> 1))
+/* Chosen so (NOW() + delta) wont overflow without an uptime of 200 years */
+#define STIME_DELTA_MAX ((int64_t)((uint64_t)~0ull >> 2))
+
+static int vcpuop_set_periodic_timer(CPUState *cs, CPUState *target,
+                                     uint64_t arg)
+{
+    struct vcpu_set_periodic_timer spt;
+
+    qemu_build_assert(sizeof(spt) == 8);
+    if (kvm_copy_from_gva(cs, arg, &spt, sizeof(spt))) {
+        return -EFAULT;
+    }
+
+    if (spt.period_ns < MILLISECS(1) || spt.period_ns > STIME_DELTA_MAX) {
+        return -EINVAL;
+    }
+
+    return do_set_periodic_timer(target, spt.period_ns);
+}
+
+static int vcpuop_stop_periodic_timer(CPUState *target)
+{
+    CPUX86State *tenv = &X86_CPU(target)->env;
+
+    qemu_mutex_lock(&tenv->xen_timers_lock);
+
+    timer_del(tenv->xen_periodic_timer);
+    tenv->xen_periodic_timer_period = 0;
+
+    qemu_mutex_unlock(&tenv->xen_timers_lock);
+    return 0;
+}
+
+static int do_set_singleshot_timer(CPUState *cs, uint64_t timeout_abs,
+                                   bool future, bool linux_wa)
+{
+    CPUX86State *env = &X86_CPU(cs)->env;
+    int64_t now = kvm_get_current_ns();
+    int64_t qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+    int64_t delta = timeout_abs - now;
+
+    if (future && timeout_abs < now) {
+        return -ETIME;
+    }
+
+    if (linux_wa && unlikely((int64_t)timeout_abs < 0 ||
+                             (delta > 0 && (uint32_t)(delta >> 50) != 0))) {
+        /*
+         * Xen has a 'Linux workaround' in do_set_timer_op() which checks
+         * for negative absolute timeout values (caused by integer
+         * overflow), and for values about 13 days in the future (2^50ns)
+         * which would be caused by jiffies overflow. For those cases, it
+         * sets the timeout 100ms in the future (not *too* soon, since if
+         * a guest really did set a long timeout on purpose we don't want
+         * to keep churning CPU time by waking it up).
+         */
+        delta = (100 * SCALE_MS);
+        timeout_abs = now + delta;
+    }
+
+    qemu_mutex_lock(&env->xen_timers_lock);
+
+    timer_mod_ns(env->xen_singleshot_timer, qemu_now + delta);
+    env->xen_singleshot_timer_ns = now + delta;
+
+    qemu_mutex_unlock(&env->xen_timers_lock);
+    return 0;
+}
+
+static int vcpuop_set_singleshot_timer(CPUState *cs, uint64_t arg)
+{
+    struct vcpu_set_singleshot_timer sst;
+
+    qemu_build_assert(sizeof(sst) == 16);
+    if (kvm_copy_from_gva(cs, arg, &sst, sizeof(sst))) {
+        return -EFAULT;
+    }
+
+    return do_set_singleshot_timer(cs, sst.timeout_abs_ns,
+                                   !!(sst.flags & VCPU_SSHOTTMR_future),
+                                   false);
+}
+
+static int vcpuop_stop_singleshot_timer(CPUState *cs)
+{
+    CPUX86State *env = &X86_CPU(cs)->env;
+
+    qemu_mutex_lock(&env->xen_timers_lock);
+
+    timer_del(env->xen_singleshot_timer);
+    env->xen_singleshot_timer_ns = 0;
+
+    qemu_mutex_unlock(&env->xen_timers_lock);
+    return 0;
+}
+
+static int kvm_xen_hcall_set_timer_op(struct kvm_xen_exit *exit, X86CPU *cpu,
+                                      uint64_t timeout)
+{
+    if (unlikely(timeout == 0)) {
+        return vcpuop_stop_singleshot_timer(CPU(cpu));
+    } else {
+        return do_set_singleshot_timer(CPU(cpu), timeout, false, true);
+    }
+}
+
 static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
                                   int cmd, int vcpu_id, uint64_t arg)
 {
-    CPUState *dest = qemu_get_cpu(vcpu_id);
     CPUState *cs = CPU(cpu);
+    CPUState *dest = cs->cpu_index == vcpu_id ? cs : qemu_get_cpu(vcpu_id);
     int err;
 
+    if (!dest) {
+        return -ENOENT;
+    }
+
     switch (cmd) {
     case VCPUOP_register_runstate_memory_area:
         err = vcpuop_register_runstate_info(cs, dest, arg);
@@ -865,6 +1065,26 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
     case VCPUOP_register_vcpu_info:
         err = vcpuop_register_vcpu_info(cs, dest, arg);
         break;
+    case VCPUOP_set_singleshot_timer: {
+        if (cs->cpu_index != vcpu_id) {
+            return -EINVAL;
+        }
+        err = vcpuop_set_singleshot_timer(dest, arg);
+        break;
+    }
+    case VCPUOP_stop_singleshot_timer:
+        if (cs->cpu_index != vcpu_id) {
+            return -EINVAL;
+        }
+        err = vcpuop_stop_singleshot_timer(dest);
+        break;
+    case VCPUOP_set_periodic_timer: {
+        err = vcpuop_set_periodic_timer(cs, dest, arg);
+        break;
+    }
+    case VCPUOP_stop_periodic_timer:
+        err = vcpuop_stop_periodic_timer(dest);
+        break;
 
     default:
         return false;
@@ -1204,6 +1424,9 @@ static bool do_kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
     }
 
     switch (code) {
+    case __HYPERVISOR_set_timer_op:
+        return kvm_xen_hcall_set_timer_op(exit, cpu,
+                                          exit->u.hcall.params[0]);
     case __HYPERVISOR_grant_table_op:
         return kvm_xen_hcall_gnttab_op(exit, cpu, exit->u.hcall.params[0],
                                        exit->u.hcall.params[1],
@@ -1313,7 +1536,25 @@ int kvm_put_xen_state(CPUState *cs)
         }
     }
 
+    if (env->xen_periodic_timer_period) {
+        ret = do_set_periodic_timer(cs, env->xen_periodic_timer_period);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
     if (!kvm_xen_has_cap(EVTCHN_SEND)) {
+        /*
+         * If the kernel has EVTCHN_SEND support then it handles timers too,
+         * so the timer will be restored by kvm_xen_set_vcpu_timer() below.
+         */
+        if (env->xen_singleshot_timer_ns) {
+            ret = do_set_singleshot_timer(cs, env->xen_singleshot_timer_ns,
+                                    false, false);
+            if (ret < 0) {
+                return ret;
+            }
+        }
         return 0;
     }
 
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 603a1077e3..c7ac8084b2 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1277,6 +1277,7 @@ static const VMStateDescription vmstate_xen_vcpu = {
         VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
         VMSTATE_UINT16_ARRAY(env.xen_virq, X86CPU, XEN_NR_VIRQS),
         VMSTATE_UINT64(env.xen_singleshot_timer_ns, X86CPU),
+        VMSTATE_UINT64(env.xen_periodic_timer_period, X86CPU),
         VMSTATE_END_OF_LIST()
     }
 };
-- 
2.35.3



  parent reply	other threads:[~2023-01-10 12:22 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-10 12:19 [PATCH v6 00/51] Xen support under KVM David Woodhouse
2023-01-10 12:19 ` [PATCH v6 01/51] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-10 12:19 ` [PATCH v6 02/51] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-10 12:19 ` [PATCH v6 03/51] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-10 12:19 ` [PATCH v6 04/51] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-10 12:19 ` [PATCH v6 05/51] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-10 12:19 ` [PATCH v6 06/51] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-10 12:19 ` [PATCH v6 07/51] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-10 12:19 ` [PATCH v6 08/51] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-16 16:20   ` Paul Durrant
2023-01-16 17:56     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 09/51] i386/xen: handle guest hypercalls David Woodhouse
2023-01-16 16:24   ` Paul Durrant
2023-01-16 17:57     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 10/51] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-10 12:20 ` [PATCH v6 11/51] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-16 16:27   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 12/51] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-16 16:36   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 13/51] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-16 16:57   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 14/51] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-16 17:17   ` Paul Durrant
2023-01-16 19:45     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 15/51] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-16 17:20   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 16/51] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-16 17:28   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 17/51] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-16 17:36   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 18/51] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-16 17:39   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 19/51] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-16 17:40   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 20/51] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-16 17:46   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 21/51] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-16 17:53   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 22/51] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-16 17:56   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 23/51] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-16 17:59   ` Paul Durrant
2023-01-16 19:54     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 24/51] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-10 12:20 ` [PATCH v6 25/51] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-16 18:00   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 26/51] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 27/51] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-10 12:20 ` [PATCH v6 28/51] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-10 12:20 ` [PATCH v6 29/51] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-10 12:20 ` [PATCH v6 30/51] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-10 12:20 ` [PATCH v6 31/51] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-10 12:20 ` [PATCH v6 32/51] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-10 12:20 ` [PATCH v6 33/51] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-10 12:20 ` [PATCH v6 34/51] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-10 12:20 ` [PATCH v6 35/51] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-10 12:20 ` [PATCH v6 36/51] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-10 12:20 ` [PATCH v6 37/51] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-10 12:20 ` [PATCH v6 38/51] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-11 14:28   ` Dr. David Alan Gilbert
2023-01-11 14:57     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 39/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-10 12:20 ` [PATCH v6 40/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-10 12:20 ` [PATCH v6 41/51] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-10 12:20 ` [PATCH v6 42/51] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 43/51] hw/xen: Support mapping grant frames David Woodhouse
2023-01-10 12:20 ` [PATCH v6 44/51] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-10 12:20 ` [PATCH v6 45/51] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-10 12:20 ` David Woodhouse [this message]
2023-01-10 12:20 ` [PATCH v6 47/51] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-10 12:20 ` [PATCH v6 48/51] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-10 12:20 ` [PATCH v6 49/51] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-10 12:20 ` [PATCH v6 50/51] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 51/51] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-10 12:37 ` [RFC PATCH v1 00/15] Xen PV backend support for KVM/Xen guests David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 01/15] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 02/15] hw/xen: Add emulated evtchn ops David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 03/15] hw/xen: Add gnttab operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 04/15] hw/xen: Pass grant ref to gnttab unmap David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 05/15] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 06/15] hw/xen: Add xenstore " David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 07/15] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 08/15] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 09/15] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 10/15] hw/xen: Build PV backend drivers for XENFV_MACHINE David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 11/15] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 12/15] hw/xen: Add backend implementation of grant table operations David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 13/15] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 14/15] hw/xen: Remove old version of Xen headers David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 15/15] i386/xen: Initialize XenBus and legacy backends from pc_init1() David Woodhouse
2023-01-10 15:43   ` [RFC PATCH v1 00/15] Xen PV backend support for KVM/Xen guests Joao Martins
2023-01-10 15:47     ` Joao Martins
2023-01-10 16:52     ` David Woodhouse
2023-01-10 17:26       ` Joao Martins

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230110122042.1562155-47-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).