xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Wei Liu <wei.liu2@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: wei.liu2@citrix.com
Subject: [PATCH RFC v1 55/74] xen/pvshim: forward evtchn ops between L0 Xen and L2 DomU
Date: Thu, 4 Jan 2018 13:06:06 +0000	[thread overview]
Message-ID: <20180104130625.28605-56-wei.liu2@citrix.com> (raw)
In-Reply-To: <20180104130625.28605-1-wei.liu2@citrix.com>

From: Roger Pau Monne <roger.pau@citrix.com>

Note that the unmask and the virq operations are handled by the shim
itself, and that FIFO event channels are not exposed to the guest.

Signed-off-by: Anthony Liguori <aliguori@amazon.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/guest/xen.c          |  25 +++-
 xen/arch/x86/pv/shim.c            | 259 ++++++++++++++++++++++++++++++++++++++
 xen/common/domain.c               |   7 ++
 xen/common/event_channel.c        | 100 +++++++++------
 xen/drivers/char/xen_pv_console.c |  11 +-
 xen/include/asm-x86/pv/shim.h     |  12 ++
 xen/include/xen/event.h           |  15 +++
 xen/include/xen/pv_console.h      |   6 +
 xen/include/xen/sched.h           |   2 +
 9 files changed, 394 insertions(+), 43 deletions(-)

diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c
index a9de20708c..653a7366ab 100644
--- a/xen/arch/x86/guest/xen.c
+++ b/xen/arch/x86/guest/xen.c
@@ -18,6 +18,7 @@
  *
  * Copyright (c) 2017 Citrix Systems Ltd.
  */
+#include <xen/event.h>
 #include <xen/init.h>
 #include <xen/types.h>
 #include <xen/pv_console.h>
@@ -155,11 +156,31 @@ static void set_vcpu_id(void)
 static void xen_evtchn_upcall(struct cpu_user_regs *regs)
 {
     struct vcpu_info *vcpu_info = this_cpu(vcpu_info);
+    unsigned long pending;
 
     vcpu_info->evtchn_upcall_pending = 0;
-    xchg(&vcpu_info->evtchn_pending_sel, 0);
+    pending = xchg(&vcpu_info->evtchn_pending_sel, 0);
 
-    pv_console_rx(regs);
+    while ( pending )
+    {
+        unsigned int l1 = ffsl(pending) - 1;
+        unsigned long evtchn = xchg(&XEN_shared_info->evtchn_pending[l1], 0);
+
+        __clear_bit(l1, &pending);
+        evtchn &= ~XEN_shared_info->evtchn_mask[l1];
+        while ( evtchn )
+        {
+            unsigned int port = ffsl(evtchn) - 1;
+
+            __clear_bit(port, &evtchn);
+            port += l1 * BITS_PER_LONG;
+
+            if ( pv_console && port == pv_console_evtchn() )
+                pv_console_rx(regs);
+            else if ( pv_shim )
+                pv_shim_inject_evtchn(port);
+        }
+    }
 
     ack_APIC_irq();
 }
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index d318f07d08..69482993f9 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -18,6 +18,8 @@
  *
  * Copyright (c) 2017 Citrix Systems Ltd.
  */
+#include <xen/event.h>
+#include <xen/guest_access.h>
 #include <xen/hypercall.h>
 #include <xen/init.h>
 #include <xen/shutdown.h>
@@ -63,6 +65,31 @@ static void __init replace_va(struct domain *d, l4_pgentry_t *l4start,
                                                       : COMPAT_L1_PROT));
 }
 
+static void evtchn_reserve(struct domain *d, unsigned int port)
+{
+    struct evtchn_unmask unmask = {
+        .port = port,
+    };
+
+    ASSERT(port_is_valid(d, port));
+    evtchn_from_port(d, port)->state = ECS_RESERVED;
+    BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_unmask, &unmask));
+}
+
+static bool evtchn_handled(struct domain *d, unsigned int port)
+{
+    ASSERT(port_is_valid(d, port));
+    /* The shim manages VIRQs, the rest is forwarded to L0. */
+    return evtchn_from_port(d, port)->state == ECS_VIRQ;
+}
+
+static void evtchn_assign_vcpu(struct domain *d, unsigned int port,
+                               unsigned int vcpu)
+{
+    ASSERT(port_is_valid(d, port));
+    evtchn_from_port(d, port)->notify_vcpu_id = vcpu;
+}
+
 void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
                               unsigned long va_start, unsigned long store_va,
                               unsigned long console_va, unsigned long vphysmap,
@@ -83,6 +110,11 @@ void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
         replace_va(d, l4start, va, param);                                     \
         dom0_update_physmap(d, (va - va_start) >> PAGE_SHIFT, param, vphysmap);\
     }                                                                          \
+    else                                                                       \
+    {                                                                          \
+        BUG_ON(evtchn_allocate_port(d, param));                                \
+        evtchn_reserve(d, param);                                              \
+    }                                                                          \
 })
     SET_AND_MAP_PARAM(HVM_PARAM_STORE_PFN, si->store_mfn, store_va);
     SET_AND_MAP_PARAM(HVM_PARAM_STORE_EVTCHN, si->store_evtchn, 0);
@@ -101,6 +133,233 @@ void pv_shim_shutdown(uint8_t reason)
     xen_hypercall_shutdown(reason);
 }
 
+long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    struct domain *d = current->domain;
+    long rc;
+
+    switch ( cmd )
+    {
+#define EVTCHN_FORWARD(cmd, port_field)                                 \
+case EVTCHNOP_##cmd: {                                                  \
+    struct evtchn_##cmd op;                                             \
+                                                                        \
+    if ( copy_from_guest(&op, arg, 1) != 0 )                            \
+        return -EFAULT;                                                 \
+                                                                        \
+    rc = xen_hypercall_event_channel_op(EVTCHNOP_##cmd, &op);           \
+    if ( rc )                                                           \
+        break;                                                          \
+                                                                        \
+    spin_lock(&d->event_lock);                                          \
+    rc = evtchn_allocate_port(d, op.port_field);                        \
+    if ( rc )                                                           \
+    {                                                                   \
+        struct evtchn_close close = {                                   \
+            .port = op.port_field,                                      \
+        };                                                              \
+                                                                        \
+        BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close)); \
+    }                                                                   \
+    else                                                                \
+        evtchn_reserve(d, op.port_field);                               \
+    spin_unlock(&d->event_lock);                                        \
+                                                                        \
+    if ( !rc && __copy_to_guest(arg, &op, 1) )                          \
+        rc = -EFAULT;                                                   \
+                                                                        \
+    break;                                                              \
+    }
+    EVTCHN_FORWARD(alloc_unbound, port)
+    EVTCHN_FORWARD(bind_interdomain, local_port)
+#undef EVTCHN_FORWARD
+
+    case EVTCHNOP_bind_virq: {
+        struct evtchn_bind_virq virq;
+        struct evtchn_alloc_unbound alloc = {
+            .dom = DOMID_SELF,
+            .remote_dom = DOMID_SELF,
+        };
+
+        if ( copy_from_guest(&virq, arg, 1) != 0 )
+            return -EFAULT;
+        /*
+         * The event channel space is actually controlled by L0 Xen, so
+         * allocate a port from L0 and then force the VIRQ to be bound to that
+         * specific port.
+         *
+         * This is only required for VIRQ because the rest of the event channel
+         * operations are handled directly by L0.
+         */
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_alloc_unbound, &alloc);
+        if ( rc )
+           break;
+
+        /* Force L1 to use the event channel port allocated on L0. */
+        rc = evtchn_bind_virq(&virq, alloc.port);
+        if ( rc )
+        {
+             struct evtchn_close free = {
+                .port = alloc.port,
+             };
+
+              xen_hypercall_event_channel_op(EVTCHNOP_close, &free);
+        }
+
+        if ( !rc && __copy_to_guest(arg, &virq, 1) )
+            rc = -EFAULT;
+
+        break;
+    }
+    case EVTCHNOP_status: {
+        struct evtchn_status status;
+
+        if ( copy_from_guest(&status, arg, 1) != 0 )
+            return -EFAULT;
+
+        if ( port_is_valid(d, status.port) && evtchn_handled(d, status.port) )
+            rc = evtchn_status(&status);
+        else
+            rc = xen_hypercall_event_channel_op(EVTCHNOP_status, &status);
+
+        break;
+    }
+    case EVTCHNOP_bind_vcpu: {
+        struct evtchn_bind_vcpu vcpu;
+
+        if ( copy_from_guest(&vcpu, arg, 1) != 0 )
+            return -EFAULT;
+
+        if ( !port_is_valid(d, vcpu.port) )
+            return -EINVAL;
+
+        if ( evtchn_handled(d, vcpu.port) )
+            rc = evtchn_bind_vcpu(vcpu.port, vcpu.vcpu);
+        else
+        {
+            rc = xen_hypercall_event_channel_op(EVTCHNOP_bind_vcpu, &vcpu);
+            if ( !rc )
+                 evtchn_assign_vcpu(d, vcpu.port, vcpu.vcpu);
+        }
+
+        break;
+    }
+    case EVTCHNOP_close: {
+        struct evtchn_close close;
+
+        if ( copy_from_guest(&close, arg, 1) != 0 )
+            return -EFAULT;
+
+        if ( !port_is_valid(d, close.port) )
+            return -EINVAL;
+
+        if ( evtchn_handled(d, close.port) )
+        {
+            rc = evtchn_close(d, close.port, true);
+            if ( rc )
+                break;
+        }
+        else
+            evtchn_free(d, evtchn_from_port(d, close.port));
+
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_close, &close);
+        if ( rc )
+            /*
+             * If the port cannot be closed on the L0 mark it as reserved
+             * in the shim to avoid re-using it.
+             */
+            evtchn_reserve(d, close.port);
+
+        set_bit(close.port, XEN_shared_info->evtchn_mask);
+
+        break;
+    }
+    case EVTCHNOP_bind_ipi: {
+        struct evtchn_bind_ipi ipi;
+
+        if ( copy_from_guest(&ipi, arg, 1) != 0 )
+            return -EFAULT;
+
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_bind_ipi, &ipi);
+        if ( rc )
+            break;
+
+        spin_lock(&d->event_lock);
+        rc = evtchn_allocate_port(d, ipi.port);
+        if ( rc )
+        {
+            struct evtchn_close close = {
+                .port = ipi.port,
+            };
+
+            /*
+             * If closing the event channel port also fails there's not
+             * much the shim can do, since it has been unable to reserve
+             * the port in it's event channel space.
+             */
+            BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close));
+            break;
+        }
+
+        evtchn_assign_vcpu(d, ipi.port, ipi.vcpu);
+        evtchn_reserve(d, ipi.port);
+        spin_unlock(&d->event_lock);
+
+        if ( __copy_to_guest(arg, &ipi, 1) )
+            rc = -EFAULT;
+
+        break;
+    }
+    case EVTCHNOP_unmask: {
+        struct evtchn_unmask unmask;
+
+        if ( copy_from_guest(&unmask, arg, 1) != 0 )
+            return -EFAULT;
+
+        /* Unmask is handled in L1 */
+        rc = evtchn_unmask(unmask.port);
+
+        break;
+    }
+    case EVTCHNOP_send: {
+        struct evtchn_send send;
+
+        if ( copy_from_guest(&send, arg, 1) != 0 )
+            return -EFAULT;
+
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_send, &send);
+
+        break;
+    }
+    case EVTCHNOP_reset: {
+        struct evtchn_reset reset;
+
+        if ( copy_from_guest(&reset, arg, 1) != 0 )
+            return -EFAULT;
+
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_reset, &reset);
+
+        break;
+    }
+    default:
+        /* No FIFO or PIRQ support for now */
+        rc = -ENOSYS;
+        break;
+    }
+
+    return rc;
+}
+
+void pv_shim_inject_evtchn(unsigned int port)
+{
+    if ( port_is_valid(pv_domain, port) )
+    {
+         struct evtchn *chn = evtchn_from_port(pv_domain, port);
+
+         evtchn_port_set_pending(pv_domain, chn->notify_vcpu_id, chn);
+    }
+}
+
 domid_t get_dom0_domid(void)
 {
     uint32_t eax, ebx, ecx, edx;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index edbf1a2ba9..d653a0b0bb 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -63,6 +63,8 @@ struct domain *domain_list;
 
 struct domain *hardware_domain __read_mostly;
 
+struct domain *pv_domain __read_mostly;
+
 #ifdef CONFIG_LATE_HWDOM
 domid_t hardware_domid __read_mostly;
 integer_param("hardware_dom", hardware_domid);
@@ -395,6 +397,11 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
         rcu_assign_pointer(*pd, d);
         rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
         spin_unlock(&domlist_update_lock);
+
+#ifdef CONFIG_X86
+        if ( pv_shim )
+            pv_domain = d;
+#endif
     }
 
     return d;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index c69f9db6db..977a876751 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -31,6 +31,10 @@
 #include <public/event_channel.h>
 #include <xsm/xsm.h>
 
+#ifdef CONFIG_X86
+#include <asm/pv/shim.h>
+#endif
+
 #define ERROR_EXIT(_errno)                                          \
     do {                                                            \
         gdprintk(XENLOG_WARNING,                                    \
@@ -156,46 +160,62 @@ static void free_evtchn_bucket(struct domain *d, struct evtchn *bucket)
     xfree(bucket);
 }
 
+int evtchn_allocate_port(struct domain *d, unsigned int port)
+{
+    if ( port > d->max_evtchn_port || port >= d->max_evtchns )
+        return -ENOSPC;
+
+    if ( port_is_valid(d, port) )
+    {
+        if ( evtchn_from_port(d, port)->state != ECS_FREE ||
+             evtchn_port_is_busy(d, port) )
+            return -EBUSY;
+    }
+    else
+    {
+        struct evtchn *chn;
+        struct evtchn **grp;
+
+        if ( !group_from_port(d, port) )
+        {
+            grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
+            if ( !grp )
+                return -ENOMEM;
+            group_from_port(d, port) = grp;
+        }
+
+        chn = alloc_evtchn_bucket(d, port);
+        if ( !chn )
+            return -ENOMEM;
+        bucket_from_port(d, port) = chn;
+
+        write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+    }
+
+    return 0;
+}
+
 static int get_free_port(struct domain *d)
 {
-    struct evtchn *chn;
-    struct evtchn **grp;
     int            port;
 
     if ( d->is_dying )
         return -EINVAL;
 
-    for ( port = 0; port_is_valid(d, port); port++ )
+    for ( port = 0; port <= d->max_evtchn_port; port++ )
     {
-        if ( port > d->max_evtchn_port )
-            return -ENOSPC;
-        if ( evtchn_from_port(d, port)->state == ECS_FREE
-             && !evtchn_port_is_busy(d, port) )
-            return port;
-    }
+        int rc = evtchn_allocate_port(d, port);
 
-    if ( port == d->max_evtchns || port > d->max_evtchn_port )
-        return -ENOSPC;
+        if ( rc == -EBUSY )
+            continue;
 
-    if ( !group_from_port(d, port) )
-    {
-        grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
-        if ( !grp )
-            return -ENOMEM;
-        group_from_port(d, port) = grp;
+        return port;
     }
 
-    chn = alloc_evtchn_bucket(d, port);
-    if ( !chn )
-        return -ENOMEM;
-    bucket_from_port(d, port) = chn;
-
-    write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
-
-    return port;
+    return -ENOSPC;
 }
 
-static void free_evtchn(struct domain *d, struct evtchn *chn)
+void evtchn_free(struct domain *d, struct evtchn *chn)
 {
     /* Clear pending event to avoid unexpected behavior on re-bind. */
     evtchn_port_clear_pending(d, chn);
@@ -345,13 +365,13 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
 }
 
 
-static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
+int evtchn_bind_virq(evtchn_bind_virq_t *bind, int port)
 {
     struct evtchn *chn;
     struct vcpu   *v;
     struct domain *d = current->domain;
-    int            port, virq = bind->virq, vcpu = bind->vcpu;
-    long           rc = 0;
+    int            virq = bind->virq, vcpu = bind->vcpu;
+    int            rc = 0;
 
     if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
         return -EINVAL;
@@ -368,7 +388,12 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
     if ( v->virq_to_evtchn[virq] != 0 )
         ERROR_EXIT(-EEXIST);
 
-    if ( (port = get_free_port(d)) < 0 )
+    if ( port >= 0 )
+    {
+        if ( (rc = evtchn_allocate_port(d, port)) < 0 )
+            ERROR_EXIT(rc);
+    }
+    else if ( (port = get_free_port(d)) < 0 )
         ERROR_EXIT(port);
 
     chn = evtchn_from_port(d, port);
@@ -511,7 +536,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
 }
 
 
-static long evtchn_close(struct domain *d1, int port1, bool_t guest)
+long evtchn_close(struct domain *d1, int port1, bool guest)
 {
     struct domain *d2 = NULL;
     struct vcpu   *v;
@@ -619,7 +644,7 @@ static long evtchn_close(struct domain *d1, int port1, bool_t guest)
 
         double_evtchn_lock(chn1, chn2);
 
-        free_evtchn(d1, chn1);
+        evtchn_free(d1, chn1);
 
         chn2->state = ECS_UNBOUND;
         chn2->u.unbound.remote_domid = d1->domain_id;
@@ -633,7 +658,7 @@ static long evtchn_close(struct domain *d1, int port1, bool_t guest)
     }
 
     spin_lock(&chn1->lock);
-    free_evtchn(d1, chn1);
+    evtchn_free(d1, chn1);
     spin_unlock(&chn1->lock);
 
  out:
@@ -839,7 +864,7 @@ static void clear_global_virq_handlers(struct domain *d)
     }
 }
 
-static long evtchn_status(evtchn_status_t *status)
+long evtchn_status(evtchn_status_t *status)
 {
     struct domain   *d;
     domid_t          dom = status->dom;
@@ -1030,6 +1055,11 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     long rc;
 
+#ifdef CONFIG_X86
+    if ( pv_shim )
+        return pv_shim_event_channel_op(cmd, arg);
+#endif
+
     switch ( cmd )
     {
     case EVTCHNOP_alloc_unbound: {
@@ -1056,7 +1086,7 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         struct evtchn_bind_virq bind_virq;
         if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
             return -EFAULT;
-        rc = evtchn_bind_virq(&bind_virq);
+        rc = evtchn_bind_virq(&bind_virq, -1);
         if ( !rc && __copy_to_guest(arg, &bind_virq, 1) )
             rc = -EFAULT; /* Cleaning up here would be a mess! */
         break;
diff --git a/xen/drivers/char/xen_pv_console.c b/xen/drivers/char/xen_pv_console.c
index 6aa694e395..fb5a7893be 100644
--- a/xen/drivers/char/xen_pv_console.c
+++ b/xen/drivers/char/xen_pv_console.c
@@ -92,6 +92,11 @@ static void notify_daemon(void)
     xen_hypercall_evtchn_send(cons_evtchn);
 }
 
+evtchn_port_t pv_console_evtchn(void)
+{
+    return cons_evtchn;
+}
+
 size_t pv_console_rx(struct cpu_user_regs *regs)
 {
     char c;
@@ -101,10 +106,6 @@ size_t pv_console_rx(struct cpu_user_regs *regs)
     if ( !cons_ring )
         return 0;
 
-    /* TODO: move this somewhere */
-    if ( !test_bit(cons_evtchn, XEN_shared_info->evtchn_pending) )
-        return 0;
-
     prod = ACCESS_ONCE(cons_ring->in_prod);
     cons = cons_ring->in_cons;
     /* Get pointers before reading the ring */
@@ -125,8 +126,6 @@ size_t pv_console_rx(struct cpu_user_regs *regs)
     ACCESS_ONCE(cons_ring->in_cons) = cons;
     notify_daemon();
 
-    clear_bit(cons_evtchn, XEN_shared_info->evtchn_pending);
-
     return recv;
 }
 
diff --git a/xen/include/asm-x86/pv/shim.h b/xen/include/asm-x86/pv/shim.h
index 8d4e8d2ae1..6f7b39c3e0 100644
--- a/xen/include/asm-x86/pv/shim.h
+++ b/xen/include/asm-x86/pv/shim.h
@@ -36,6 +36,8 @@ void pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
                        unsigned long console_va, unsigned long vphysmap,
                        start_info_t *si);
 void pv_shim_shutdown(uint8_t reason);
+long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
+void pv_shim_inject_evtchn(unsigned int port);
 domid_t get_dom0_domid(void);
 
 #else
@@ -53,6 +55,16 @@ static inline void pv_shim_shutdown(uint8_t reason)
 {
     ASSERT_UNREACHABLE();
 }
+static inline long pv_shim_event_channel_op(int cmd,
+                                            XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    ASSERT_UNREACHABLE();
+    return 0;
+}
+static inline void pv_shim_inject_evtchn(unsigned int port)
+{
+    ASSERT_UNREACHABLE();
+}
 static inline domid_t get_dom0_domid(void)
 {
     return 0;
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 87915ead69..3d202d8172 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -48,6 +48,21 @@ int evtchn_send(struct domain *d, unsigned int lport);
 /* Bind a local event-channel port to the specified VCPU. */
 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
 
+/* Bind a VIRQ. */
+int evtchn_bind_virq(evtchn_bind_virq_t *bind, int port);
+
+/* Get the status of an event channel port. */
+long evtchn_status(evtchn_status_t *status);
+
+/* Close an event channel. */
+long evtchn_close(struct domain *d1, int port1, bool guest);
+
+/* Free an event channel. */
+void evtchn_free(struct domain *d, struct evtchn *chn);
+
+/* Allocate a specific event channel port. */
+int evtchn_allocate_port(struct domain *d, unsigned int port);
+
 /* Unmask a local event-channel port. */
 int evtchn_unmask(unsigned int port);
 
diff --git a/xen/include/xen/pv_console.h b/xen/include/xen/pv_console.h
index e578b56620..cb92539666 100644
--- a/xen/include/xen/pv_console.h
+++ b/xen/include/xen/pv_console.h
@@ -10,6 +10,7 @@ void pv_console_set_rx_handler(serial_rx_fn fn);
 void pv_console_init_postirq(void);
 void pv_console_puts(const char *buf);
 size_t pv_console_rx(struct cpu_user_regs *regs);
+evtchn_port_t pv_console_evtchn(void);
 
 #else
 
@@ -18,6 +19,11 @@ static inline void pv_console_set_rx_handler(serial_rx_fn fn) { }
 static inline void pv_console_init_postirq(void) { }
 static inline void pv_console_puts(const char *buf) { }
 static inline size_t pv_console_rx(struct cpu_user_regs *regs) { return 0; }
+evtchn_port_t pv_console_evtchn(void)
+{
+    ASSERT_UNREACHABLE();
+    return 0;
+}
 
 #endif /* !CONFIG_XEN_GUEST */
 #endif /* __XEN_PV_CONSOLE_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 64abc1df6c..ac65d0c0df 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -48,6 +48,8 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t);
 /* A global pointer to the hardware domain (usually DOM0). */
 extern struct domain *hardware_domain;
 
+extern struct domain *pv_domain;
+
 #ifdef CONFIG_LATE_HWDOM
 extern domid_t hardware_domid;
 #else
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-01-04 13:29 UTC|newest]

Thread overview: 206+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-04 13:05 [PATCH RFC v1 00/74] Run PV guest in PVH container Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 01/74] x86/svm: Offer CPUID Faulting to AMD HVM guests as well Wei Liu
2018-01-04 14:00   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 02/74] x86: Common cpuid faulting support Wei Liu
2018-01-04 14:19   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 03/74] x86/upcall: inject a spurious event after setting upcall vector Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 04/74] tools/libxc: initialise hvm loader elf log fd to get more logging Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 05/74] tools/libxc: remove extraneous newline in xc_dom_load_acpi Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 06/74] tools/libelf: fix elf notes check for PVH guest Wei Liu
2018-01-04 14:37   ` Jan Beulich
2018-01-08 15:34     ` Wei Liu
2018-01-08 16:02       ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 07/74] tools/libxc: Multi modules support Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 08/74] libxl: Introduce hack to allow PVH mode to add a shim Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 09/74] xen/common: Widen the guest logging buffer slightly Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 10/74] x86/time: Print a more helpful error when a platform timer can't be found Wei Liu
2018-01-05 10:37   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 11/74] x86/link: Introduce and use SECTION_ALIGN Wei Liu
2018-01-05 10:38   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 12/74] xen/acpi: mark the PM timer FADT field as optional Wei Liu
2018-01-05 10:52   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 13/74] xen/domctl: Return arch_config via getdomaininfo Wei Liu
2018-01-05 10:58   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 14/74] tools/ocaml: Expose arch_config in domaininfo Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 15/74] tools/ocaml: Extend domain_create() to take arch_domainconfig Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 16/74] x86/fixmap: Modify fix_to_virt() to return a void pointer Wei Liu
2018-01-05 11:05   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 17/74] ---- x86/Kconfig: Options for Xen and PVH support Wei Liu
2018-01-05 11:11   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 18/74] x86/link: Relocate program headers Wei Liu
2018-01-05 11:20   ` Jan Beulich
2018-01-08 15:43     ` Wei Liu
2018-01-08 16:26       ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 19/74] x86: introduce ELFNOTE macro Wei Liu
2018-01-05 11:27   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 20/74] x86: produce a binary that can be booted as PVH Wei Liu
2018-01-05 11:39   ` Jan Beulich
2018-01-08 15:59     ` Wei Liu
2018-01-08 16:42       ` Jan Beulich
2018-01-09 13:49         ` Wei Liu
2018-01-10 19:10     ` Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 21/74] x86/entry: Early PVH boot code Wei Liu
2018-01-05 13:32   ` Jan Beulich
2018-01-09 15:45     ` Wei Liu
2018-01-09 16:41       ` Jan Beulich
2018-01-09 17:10         ` Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 22/74] x86/boot: Map more than the first 16MB Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 23/74] x86/entry: Probe for Xen early during boot Wei Liu
2018-01-05 13:40   ` Jan Beulich
2018-01-10 17:45     ` Wei Liu
2018-01-11  7:55       ` Jan Beulich
2018-01-11  9:43         ` Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 24/74] x86/guest: Hypercall support Wei Liu
2018-01-05 13:53   ` Jan Beulich
2018-01-05 14:09     ` Andrew Cooper
2018-01-04 13:05 ` [PATCH RFC v1 25/74] x86/shutdown: Support for using SCHEDOP_{shutdown, reboot} Wei Liu
2018-01-05 14:01   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 26/74] x86/pvh: Retrieve memory map from Xen Wei Liu
2018-01-05 14:05   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 27/74] xen/console: Introduce console=xen Wei Liu
2018-01-05 14:08   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 28/74] x86: initialise shared_info page Wei Liu
2018-01-05 14:11   ` Jan Beulich
2018-01-05 14:20     ` Andrew Cooper
2018-01-05 14:28       ` Roger Pau Monné
2018-01-05 14:40         ` Andrew Cooper
2018-01-04 13:05 ` [PATCH RFC v1 29/74] x86: xen pv clock time source Wei Liu
2018-01-05 14:17   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 30/74] x86: APIC timer calibration when running as a guest Wei Liu
2018-01-05 14:35   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 31/74] x86: read wallclock from Xen running in pvh mode Wei Liu
2018-01-05 14:43   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 32/74] x86: don't swallow the first command line item " Wei Liu
2018-01-05 14:49   ` Jan Beulich
2018-01-09 14:30   ` Roger Pau Monné
2018-01-04 13:05 ` [PATCH RFC v1 33/74] x86/guest: enable event channels upcalls Wei Liu
2018-01-05 15:07   ` Jan Beulich
2018-01-05 15:19     ` Andrew Cooper
2018-01-04 13:05 ` [PATCH RFC v1 34/74] x86/guest: add PV console code Wei Liu
2018-01-05 15:22   ` Jan Beulich
2018-01-10 15:33     ` Roger Pau Monné
2018-01-10 15:55       ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 35/74] x86/guest: use PV console for Xen/Dom0 I/O Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 36/74] --- x86/shim: Kconfig and command line options Wei Liu
2018-01-05 15:26   ` Jan Beulich
2018-01-05 17:51     ` Andrew Cooper
2018-01-08  8:22       ` Jan Beulich
2018-01-08 11:33         ` Andrew Cooper
2018-01-08 11:46           ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 37/74] tools/firmware: Build and install xen-shim Wei Liu
2018-01-04 13:05 ` [PATCH RFC v1 38/74] x86/pv-shim: Force CPUID faulting in pv-shim mode Wei Liu
2018-01-08 10:16   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 39/74] xen/x86: make VGA support selectable Wei Liu
2018-01-08 10:22   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 40/74] xen/x86: report domain id on cpuid Wei Liu
2018-01-08 10:27   ` Jan Beulich
2018-01-08 10:34     ` Andrew Cooper
2018-01-08 11:11       ` Jan Beulich
2018-01-08 11:22         ` Andrew Cooper
2018-01-08 11:27           ` Jan Beulich
2018-01-08 11:29   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 41/74] xen/pvh: do not mark the low 1MB as IO mem Wei Liu
2018-01-08 10:30   ` Jan Beulich
2018-01-08 10:37     ` Roger Pau Monné
2018-01-08 11:11       ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 42/74] sched/null: skip vCPUs on the waitqueue that are blocked Wei Liu
2018-01-08 10:37   ` Jan Beulich
2018-01-08 11:12     ` George Dunlap
2018-01-12  9:54       ` Dario Faggioli
2018-01-12 10:45         ` Roger Pau Monné
2018-01-12 11:16           ` Dario Faggioli
2018-01-12 11:22             ` Roger Pau Monné
2018-01-12 10:41   ` Dario Faggioli
2018-01-04 13:05 ` [PATCH RFC v1 43/74] xen: introduce rangeset_reserve_hole Wei Liu
2018-01-08 10:46   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 44/74] xen/pvshim: keep track of unused pages Wei Liu
2018-01-08 10:58   ` Jan Beulich
2018-01-08 11:04     ` Roger Pau Monné
2018-01-08 11:22       ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 45/74] x86/guest: use unpopulated memory to map the shared_info page Wei Liu
2018-01-08 11:03   ` Jan Beulich
2018-01-08 11:06     ` Roger Pau Monné
2018-01-08 11:25       ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 46/74] xen/guest: fetch vCPU ID from Xen Wei Liu
2018-01-08 11:04   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 47/74] x86/guest: fix upcall vector setup Wei Liu
2018-01-08 11:08   ` Jan Beulich
2018-01-04 13:05 ` [PATCH RFC v1 48/74] x86/guest: unmask console event channel Wei Liu
2018-01-04 13:06 ` [PATCH RFC v1 49/74] x86/guest: map per-cpu vcpu_info area Wei Liu
2018-01-08 13:21   ` Jan Beulich
2018-01-09 12:08     ` Roger Pau Monné
2018-01-04 13:06 ` [PATCH RFC v1 50/74] xen/pvshim: remove Dom0 kernel support check Wei Liu
2018-01-08 13:28   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 51/74] xen/pvshim: don't allow access to iomem or ioports Wei Liu
2018-01-08 13:29   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 52/74] xen: mark xenstore/console pages as RAM and add them to dom_io Wei Liu
2018-01-08 13:49   ` Jan Beulich
2018-01-09  9:25     ` Roger Pau Monné
2018-01-09 11:03       ` Jan Beulich
2018-01-09 11:26         ` Roger Pau Monné
2018-01-09 13:34           ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 53/74] xen/pvshim: modify Dom0 builder in order to build a DomU Wei Liu
2018-01-08 14:06   ` Jan Beulich
2018-01-09 16:09     ` Roger Pau Monné
2018-01-09 16:26       ` Jan Beulich
2018-01-09  9:06   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 54/74] xen/pvshim: set correct domid value Wei Liu
2018-01-08 14:17   ` Jan Beulich
2018-01-09 16:27     ` Roger Pau Monné
2018-01-04 13:06 ` Wei Liu [this message]
2018-01-08 16:05   ` [PATCH RFC v1 55/74] xen/pvshim: forward evtchn ops between L0 Xen and L2 DomU Jan Beulich
2018-01-08 16:22     ` Roger Pau Monné
2018-01-09  8:00       ` Jan Beulich
2018-01-09 16:45         ` Roger Pau Monné
2018-01-09 17:42           ` Jan Beulich
2018-01-09 17:50     ` Anthony Liguori
2018-01-10 12:23       ` Roger Pau Monné
2018-01-09  7:49   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 56/74] xen/pvshim: add grant table operations Wei Liu
2018-01-08 17:19   ` Jan Beulich
2018-01-09 18:34     ` Roger Pau Monné
2018-01-10  7:28       ` Jan Beulich
2018-01-10  8:01         ` Roger Pau Monné
2018-01-04 13:06 ` [PATCH RFC v1 57/74] x86/pv-shim: shadow PV console's page for L2 DomU Wei Liu
2018-01-09  9:13   ` Jan Beulich
2018-01-09 15:43     ` Sergey Dyasli
2018-01-09 16:28       ` Jan Beulich
2018-01-10 16:56         ` Sergey Dyasli
2018-01-12  7:03           ` Sarah Newman
2018-01-04 13:06 ` [PATCH RFC v1 58/74] xen/pvshim: add migration support Wei Liu
2018-01-09  9:38   ` Jan Beulich
2018-01-10 12:54     ` Roger Pau Monné
2018-01-04 13:06 ` [PATCH RFC v1 59/74] xen/pvshim: add shim_mem cmdline parameter Wei Liu
2018-01-09  9:47   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 60/74] xen/pvshim: set max_pages to the value of tot_pages Wei Liu
2018-01-09  9:48   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 61/74] xen/pvshim: support vCPU hotplug Wei Liu
2018-01-09 10:16   ` Jan Beulich
2018-01-10 13:07     ` Roger Pau Monné
2018-01-10 13:33       ` Jan Beulich
2018-01-10 14:40     ` Roger Pau Monné
2018-01-04 13:06 ` [PATCH RFC v1 62/74] xen/pvshim: memory hotplug Wei Liu
2018-01-09 10:42   ` Jan Beulich
2018-01-10 13:36     ` Roger Pau Monné
2018-01-10 13:42       ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 63/74] xen/shim: modify shim_mem parameter behaviour Wei Liu
2018-01-09 10:48   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 64/74] xen/pvshim: use default position for the m2p mappings Wei Liu
2018-01-09 10:50   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 65/74] xen/shim: crash instead of reboot in shim mode Wei Liu
2018-01-09 10:52   ` Jan Beulich
2018-01-04 13:06 ` [PATCH RFC v1 66/74] xen/shim: allow DomU to have as many vcpus as available Wei Liu
2018-01-09 10:59   ` Jan Beulich
2018-01-10 16:14     ` Roger Pau Monné
2018-01-04 13:06 ` [PATCH RFC v1 67/74] libxl: libxl__build_hvm: Introduce separate b_info parameter Wei Liu
2018-01-04 13:06 ` [PATCH RFC v1 68/74] libxl__domain_build_info_setdefault_pvhhvm: introduce Wei Liu
2018-01-04 13:06 ` [PATCH RFC v1 69/74] libxl_bitmap_copy_alloc: copy 0, NULL as 0, NULL Wei Liu
2018-01-04 13:06 ` [PATCH RFC v1 70/74] libxl: pvshim: Check state->shim_path before domain type Wei Liu
2018-01-04 13:06 ` [PATCH RFC v1 71/74] libxl: pvshim: Provide first-class config settings to enable shim mode Wei Liu
2018-01-04 13:06 ` [PATCH RFC v1 72/74] libxl: pvshim: Introduce pvhshim_extra Wei Liu
2018-01-04 13:06 ` [PATCH RFC v1 73/74] xl: pvshim: Provide and document xl config Wei Liu
2018-01-04 13:06 ` [PATCH RFC v1 74/74] libxl: pvshim: Set video_memkb to ~0 Wei Liu
2018-01-08 16:12 ` [PATCH RFC v1 00/74] Run PV guest in PVH container Ian Jackson
2018-01-11 15:39   ` Ian Jackson
2018-01-10 16:26 ` George Dunlap
2018-01-10 16:28   ` Wei Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180104130625.28605-56-wei.liu2@citrix.com \
    --to=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).