From: Wei Liu <wei.liu2@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: wei.liu2@citrix.com
Subject: [PATCH v2 47/62] xen/pvshim: forward evtchn ops between L0 Xen and L2 DomU
Date: Fri, 12 Jan 2018 11:28:55 +0000 [thread overview]
Message-ID: <20180112112910.16762-48-wei.liu2@citrix.com> (raw)
In-Reply-To: <20180112112910.16762-1-wei.liu2@citrix.com>
From: Roger Pau Monne <roger.pau@citrix.com>
Note that the unmask and the virq operations are handled by the shim
itself, and that FIFO event channels are not exposed to the guest.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
Changes since v1:
- Use find_first_set_bit instead of ffsl.
- Indent macro one more level.
- Have a single evtchn_close struct in pv_shim_event_channel_op.
- Add blank lines between switch cases.
- Use -EOPNOTSUPP in order to signal lack of FIFO or PIRQ support.
- Switch evtchn_bind_virq parameter to evtchn_port_t and use 0 signal
allocation needed.
- Switch evtchn helpers return type to int instead of long.
- Re-write event channel hypercall table handler instead of adding
hooks.
- Remove the pv_domain variable and instead use a static variable in
shim code.
---
xen/arch/x86/compat.c | 4 +-
xen/arch/x86/guest/xen.c | 25 +++-
xen/arch/x86/pv/hypercall.c | 17 +++
xen/arch/x86/pv/shim.c | 263 ++++++++++++++++++++++++++++++++++++++
xen/common/event_channel.c | 99 ++++++++------
xen/drivers/char/xen_pv_console.c | 11 +-
xen/include/asm-x86/hypercall.h | 3 +
xen/include/asm-x86/pv/shim.h | 5 +
xen/include/xen/event.h | 15 +++
xen/include/xen/pv_console.h | 6 +
10 files changed, 402 insertions(+), 46 deletions(-)
diff --git a/xen/arch/x86/compat.c b/xen/arch/x86/compat.c
index f417cd5034..9d376a4589 100644
--- a/xen/arch/x86/compat.c
+++ b/xen/arch/x86/compat.c
@@ -69,8 +69,8 @@ long do_event_channel_op_compat(XEN_GUEST_HANDLE_PARAM(evtchn_op_t) uop)
case EVTCHNOP_bind_ipi:
case EVTCHNOP_bind_vcpu:
case EVTCHNOP_unmask:
- return do_event_channel_op(op.cmd,
- guest_handle_from_ptr(&uop.p->u, void));
+ return pv_get_hypercall_handler(__HYPERVISOR_event_channel_op, false)
+ (op.cmd, (unsigned long)&uop.p->u, 0, 0, 0, 0);
default:
return -ENOSYS;
diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c
index aff16a0e35..57b297ad47 100644
--- a/xen/arch/x86/guest/xen.c
+++ b/xen/arch/x86/guest/xen.c
@@ -18,6 +18,7 @@
*
* Copyright (c) 2017 Citrix Systems Ltd.
*/
+#include <xen/event.h>
#include <xen/init.h>
#include <xen/mm.h>
#include <xen/pfn.h>
@@ -193,11 +194,31 @@ static void __init init_memmap(void)
static void xen_evtchn_upcall(struct cpu_user_regs *regs)
{
struct vcpu_info *vcpu_info = this_cpu(vcpu_info);
+ unsigned long pending;
vcpu_info->evtchn_upcall_pending = 0;
- write_atomic(&vcpu_info->evtchn_pending_sel, 0);
+ pending = xchg(&vcpu_info->evtchn_pending_sel, 0);
- pv_console_rx(regs);
+ while ( pending )
+ {
+ unsigned int l1 = find_first_set_bit(pending);
+ unsigned long evtchn = xchg(&XEN_shared_info->evtchn_pending[l1], 0);
+
+ __clear_bit(l1, &pending);
+ evtchn &= ~XEN_shared_info->evtchn_mask[l1];
+ while ( evtchn )
+ {
+ unsigned int port = find_first_set_bit(evtchn);
+
+ __clear_bit(port, &evtchn);
+ port += l1 * BITS_PER_LONG;
+
+ if ( pv_console && port == pv_console_evtchn() )
+ pv_console_rx(regs);
+ else if ( pv_shim )
+ pv_shim_inject_evtchn(port);
+ }
+ }
ack_APIC_irq();
}
diff --git a/xen/arch/x86/pv/hypercall.c b/xen/arch/x86/pv/hypercall.c
index f79f7eef62..3b72d6a44d 100644
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -320,6 +320,23 @@ void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
*(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */
}
+void __init pv_hypercall_table_replace(unsigned int hypercall,
+ hypercall_fn_t * native,
+ hypercall_fn_t *compat)
+{
+#define HANDLER_POINTER(f) \
+ ((unsigned long *)__va(__pa(&pv_hypercall_table[hypercall].f)))
+ write_atomic(HANDLER_POINTER(native), (unsigned long)native);
+ write_atomic(HANDLER_POINTER(compat), (unsigned long)compat);
+#undef HANDLER_POINTER
+}
+
+hypercall_fn_t *pv_get_hypercall_handler(unsigned int hypercall, bool compat)
+{
+ return compat ? pv_hypercall_table[hypercall].compat
+ : pv_hypercall_table[hypercall].native;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index 78351c9ee0..36f3a366d3 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -18,6 +18,8 @@
*
* Copyright (c) 2017 Citrix Systems Ltd.
*/
+#include <xen/event.h>
+#include <xen/guest_access.h>
#include <xen/hypercall.h>
#include <xen/init.h>
#include <xen/shutdown.h>
@@ -35,6 +37,10 @@ bool pv_shim;
boolean_param("pv-shim", pv_shim);
#endif
+static struct domain *guest;
+
+static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
+
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER| \
_PAGE_GUEST_KERNEL)
#define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
@@ -63,6 +69,27 @@ static void __init replace_va_mapping(struct domain *d, l4_pgentry_t *l4start,
: COMPAT_L1_PROT));
}
+static void evtchn_reserve(struct domain *d, unsigned int port)
+{
+ ASSERT(port_is_valid(d, port));
+ evtchn_from_port(d, port)->state = ECS_RESERVED;
+ BUG_ON(xen_hypercall_evtchn_unmask(port));
+}
+
+static bool evtchn_handled(struct domain *d, unsigned int port)
+{
+ ASSERT(port_is_valid(d, port));
+ /* The shim manages VIRQs, the rest is forwarded to L0. */
+ return evtchn_from_port(d, port)->state == ECS_VIRQ;
+}
+
+static void evtchn_assign_vcpu(struct domain *d, unsigned int port,
+ unsigned int vcpu)
+{
+ ASSERT(port_is_valid(d, port));
+ evtchn_from_port(d, port)->notify_vcpu_id = vcpu;
+}
+
void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
unsigned long va_start, unsigned long store_va,
unsigned long console_va, unsigned long vphysmap,
@@ -82,6 +109,11 @@ void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
replace_va_mapping(d, l4start, va, param); \
dom0_update_physmap(d, PFN_DOWN((va) - va_start), param, vphysmap); \
} \
+ else \
+ { \
+ BUG_ON(evtchn_allocate_port(d, param)); \
+ evtchn_reserve(d, param); \
+ } \
})
SET_AND_MAP_PARAM(HVM_PARAM_STORE_PFN, si->store_mfn, store_va);
SET_AND_MAP_PARAM(HVM_PARAM_STORE_EVTCHN, si->store_evtchn, 0);
@@ -92,6 +124,10 @@ void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
SET_AND_MAP_PARAM(HVM_PARAM_CONSOLE_EVTCHN, si->console.domU.evtchn, 0);
}
#undef SET_AND_MAP_PARAM
+ pv_hypercall_table_replace(__HYPERVISOR_event_channel_op,
+ (hypercall_fn_t *)pv_shim_event_channel_op,
+ (hypercall_fn_t *)pv_shim_event_channel_op);
+ guest = d;
}
void pv_shim_shutdown(uint8_t reason)
@@ -100,6 +136,233 @@ void pv_shim_shutdown(uint8_t reason)
xen_hypercall_shutdown(reason);
}
+static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ struct domain *d = current->domain;
+ struct evtchn_close close;
+ long rc;
+
+ switch ( cmd )
+ {
+#define EVTCHN_FORWARD(cmd, port_field) \
+ case EVTCHNOP_##cmd: { \
+ struct evtchn_##cmd op; \
+ \
+ if ( copy_from_guest(&op, arg, 1) != 0 ) \
+ return -EFAULT; \
+ \
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_##cmd, &op); \
+ if ( rc ) \
+ break; \
+ \
+ spin_lock(&d->event_lock); \
+ rc = evtchn_allocate_port(d, op.port_field); \
+ if ( rc ) \
+ { \
+ close.port = op.port_field; \
+ BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close)); \
+ } \
+ else \
+ evtchn_reserve(d, op.port_field); \
+ spin_unlock(&d->event_lock); \
+ \
+ if ( !rc && __copy_to_guest(arg, &op, 1) ) \
+ rc = -EFAULT; \
+ \
+ break; \
+ }
+
+ EVTCHN_FORWARD(alloc_unbound, port)
+ EVTCHN_FORWARD(bind_interdomain, local_port)
+#undef EVTCHN_FORWARD
+
+ case EVTCHNOP_bind_virq: {
+ struct evtchn_bind_virq virq;
+ struct evtchn_alloc_unbound alloc = {
+ .dom = DOMID_SELF,
+ .remote_dom = DOMID_SELF,
+ };
+
+ if ( copy_from_guest(&virq, arg, 1) != 0 )
+ return -EFAULT;
+ /*
+ * The event channel space is actually controlled by L0 Xen, so
+ * allocate a port from L0 and then force the VIRQ to be bound to that
+ * specific port.
+ *
+ * This is only required for VIRQ because the rest of the event channel
+ * operations are handled directly by L0.
+ */
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_alloc_unbound, &alloc);
+ if ( rc )
+ break;
+
+ /* Force L1 to use the event channel port allocated on L0. */
+ rc = evtchn_bind_virq(&virq, alloc.port);
+ if ( rc )
+ {
+ close.port = alloc.port;
+ BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close));
+ }
+
+ if ( !rc && __copy_to_guest(arg, &virq, 1) )
+ rc = -EFAULT;
+
+ break;
+ }
+
+ case EVTCHNOP_status: {
+ struct evtchn_status status;
+
+ if ( copy_from_guest(&status, arg, 1) != 0 )
+ return -EFAULT;
+
+ /*
+ * NB: if the event channel is not handled by the shim, just forward
+ * the status request to L0, even if the port is not valid.
+ */
+ if ( port_is_valid(d, status.port) && evtchn_handled(d, status.port) )
+ rc = evtchn_status(&status);
+ else
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_status, &status);
+
+ break;
+ }
+
+ case EVTCHNOP_bind_vcpu: {
+ struct evtchn_bind_vcpu vcpu;
+
+ if ( copy_from_guest(&vcpu, arg, 1) != 0 )
+ return -EFAULT;
+
+ if ( !port_is_valid(d, vcpu.port) )
+ return -EINVAL;
+
+ if ( evtchn_handled(d, vcpu.port) )
+ rc = evtchn_bind_vcpu(vcpu.port, vcpu.vcpu);
+ else
+ {
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_bind_vcpu, &vcpu);
+ if ( !rc )
+ evtchn_assign_vcpu(d, vcpu.port, vcpu.vcpu);
+ }
+
+ break;
+ }
+
+ case EVTCHNOP_close: {
+ if ( copy_from_guest(&close, arg, 1) != 0 )
+ return -EFAULT;
+
+ if ( !port_is_valid(d, close.port) )
+ return -EINVAL;
+
+ set_bit(close.port, XEN_shared_info->evtchn_mask);
+
+ if ( evtchn_handled(d, close.port) )
+ {
+ rc = evtchn_close(d, close.port, true);
+ if ( rc )
+ break;
+ }
+ else
+ evtchn_free(d, evtchn_from_port(d, close.port));
+
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_close, &close);
+ if ( rc )
+ /*
+ * If the port cannot be closed on the L0 mark it as reserved
+ * in the shim to avoid re-using it.
+ */
+ evtchn_reserve(d, close.port);
+
+ break;
+ }
+
+ case EVTCHNOP_bind_ipi: {
+ struct evtchn_bind_ipi ipi;
+
+ if ( copy_from_guest(&ipi, arg, 1) != 0 )
+ return -EFAULT;
+
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_bind_ipi, &ipi);
+ if ( rc )
+ break;
+
+ spin_lock(&d->event_lock);
+ rc = evtchn_allocate_port(d, ipi.port);
+ if ( rc )
+ {
+ spin_unlock(&d->event_lock);
+
+ close.port = ipi.port;
+ BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close));
+ break;
+ }
+
+ evtchn_assign_vcpu(d, ipi.port, ipi.vcpu);
+ evtchn_reserve(d, ipi.port);
+ spin_unlock(&d->event_lock);
+
+ if ( __copy_to_guest(arg, &ipi, 1) )
+ rc = -EFAULT;
+
+ break;
+ }
+
+ case EVTCHNOP_unmask: {
+ struct evtchn_unmask unmask;
+
+ if ( copy_from_guest(&unmask, arg, 1) != 0 )
+ return -EFAULT;
+
+ /* Unmask is handled in L1 */
+ rc = evtchn_unmask(unmask.port);
+
+ break;
+ }
+
+ case EVTCHNOP_send: {
+ struct evtchn_send send;
+
+ if ( copy_from_guest(&send, arg, 1) != 0 )
+ return -EFAULT;
+
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_send, &send);
+
+ break;
+ }
+
+ case EVTCHNOP_reset: {
+ struct evtchn_reset reset;
+
+ if ( copy_from_guest(&reset, arg, 1) != 0 )
+ return -EFAULT;
+
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_reset, &reset);
+
+ break;
+ }
+
+ default:
+ /* No FIFO or PIRQ support for now */
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
+void pv_shim_inject_evtchn(unsigned int port)
+{
+ if ( port_is_valid(guest, port) )
+ {
+ struct evtchn *chn = evtchn_from_port(guest, port);
+
+ evtchn_port_set_pending(guest, chn->notify_vcpu_id, chn);
+ }
+}
+
domid_t get_initial_domain_id(void)
{
uint32_t eax, ebx, ecx, edx;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index c69f9db6db..be834c5c78 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -156,46 +156,62 @@ static void free_evtchn_bucket(struct domain *d, struct evtchn *bucket)
xfree(bucket);
}
+int evtchn_allocate_port(struct domain *d, evtchn_port_t port)
+{
+ if ( port > d->max_evtchn_port || port >= d->max_evtchns )
+ return -ENOSPC;
+
+ if ( port_is_valid(d, port) )
+ {
+ if ( evtchn_from_port(d, port)->state != ECS_FREE ||
+ evtchn_port_is_busy(d, port) )
+ return -EBUSY;
+ }
+ else
+ {
+ struct evtchn *chn;
+ struct evtchn **grp;
+
+ if ( !group_from_port(d, port) )
+ {
+ grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
+ if ( !grp )
+ return -ENOMEM;
+ group_from_port(d, port) = grp;
+ }
+
+ chn = alloc_evtchn_bucket(d, port);
+ if ( !chn )
+ return -ENOMEM;
+ bucket_from_port(d, port) = chn;
+
+ write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+ }
+
+ return 0;
+}
+
static int get_free_port(struct domain *d)
{
- struct evtchn *chn;
- struct evtchn **grp;
int port;
if ( d->is_dying )
return -EINVAL;
- for ( port = 0; port_is_valid(d, port); port++ )
+ for ( port = 0; port <= d->max_evtchn_port; port++ )
{
- if ( port > d->max_evtchn_port )
- return -ENOSPC;
- if ( evtchn_from_port(d, port)->state == ECS_FREE
- && !evtchn_port_is_busy(d, port) )
- return port;
- }
+ int rc = evtchn_allocate_port(d, port);
- if ( port == d->max_evtchns || port > d->max_evtchn_port )
- return -ENOSPC;
+ if ( rc == -EBUSY )
+ continue;
- if ( !group_from_port(d, port) )
- {
- grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
- if ( !grp )
- return -ENOMEM;
- group_from_port(d, port) = grp;
+ return port;
}
- chn = alloc_evtchn_bucket(d, port);
- if ( !chn )
- return -ENOMEM;
- bucket_from_port(d, port) = chn;
-
- write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
-
- return port;
+ return -ENOSPC;
}
-static void free_evtchn(struct domain *d, struct evtchn *chn)
+void evtchn_free(struct domain *d, struct evtchn *chn)
{
/* Clear pending event to avoid unexpected behavior on re-bind. */
evtchn_port_clear_pending(d, chn);
@@ -345,13 +361,13 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
}
-static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
+int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port)
{
struct evtchn *chn;
struct vcpu *v;
struct domain *d = current->domain;
- int port, virq = bind->virq, vcpu = bind->vcpu;
- long rc = 0;
+ int virq = bind->virq, vcpu = bind->vcpu;
+ int rc = 0;
if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
return -EINVAL;
@@ -368,8 +384,19 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
if ( v->virq_to_evtchn[virq] != 0 )
ERROR_EXIT(-EEXIST);
- if ( (port = get_free_port(d)) < 0 )
- ERROR_EXIT(port);
+ if ( port != 0 )
+ {
+ if ( (rc = evtchn_allocate_port(d, port)) != 0 )
+ ERROR_EXIT(rc);
+ }
+ else
+ {
+ int alloc_port = get_free_port(d);
+
+ if ( alloc_port < 0 )
+ ERROR_EXIT(alloc_port);
+ port = alloc_port;
+ }
chn = evtchn_from_port(d, port);
@@ -511,7 +538,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
}
-static long evtchn_close(struct domain *d1, int port1, bool_t guest)
+int evtchn_close(struct domain *d1, int port1, bool guest)
{
struct domain *d2 = NULL;
struct vcpu *v;
@@ -619,7 +646,7 @@ static long evtchn_close(struct domain *d1, int port1, bool_t guest)
double_evtchn_lock(chn1, chn2);
- free_evtchn(d1, chn1);
+ evtchn_free(d1, chn1);
chn2->state = ECS_UNBOUND;
chn2->u.unbound.remote_domid = d1->domain_id;
@@ -633,7 +660,7 @@ static long evtchn_close(struct domain *d1, int port1, bool_t guest)
}
spin_lock(&chn1->lock);
- free_evtchn(d1, chn1);
+ evtchn_free(d1, chn1);
spin_unlock(&chn1->lock);
out:
@@ -839,7 +866,7 @@ static void clear_global_virq_handlers(struct domain *d)
}
}
-static long evtchn_status(evtchn_status_t *status)
+int evtchn_status(evtchn_status_t *status)
{
struct domain *d;
domid_t dom = status->dom;
@@ -1056,7 +1083,7 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
struct evtchn_bind_virq bind_virq;
if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
return -EFAULT;
- rc = evtchn_bind_virq(&bind_virq);
+ rc = evtchn_bind_virq(&bind_virq, 0);
if ( !rc && __copy_to_guest(arg, &bind_virq, 1) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
diff --git a/xen/drivers/char/xen_pv_console.c b/xen/drivers/char/xen_pv_console.c
index d4f0532101..948343303e 100644
--- a/xen/drivers/char/xen_pv_console.c
+++ b/xen/drivers/char/xen_pv_console.c
@@ -88,6 +88,11 @@ static void notify_daemon(void)
xen_hypercall_evtchn_send(cons_evtchn);
}
+evtchn_port_t pv_console_evtchn(void)
+{
+ return cons_evtchn;
+}
+
size_t pv_console_rx(struct cpu_user_regs *regs)
{
char c;
@@ -97,10 +102,6 @@ size_t pv_console_rx(struct cpu_user_regs *regs)
if ( !cons_ring )
return 0;
- /* TODO: move this somewhere */
- if ( !test_bit(cons_evtchn, XEN_shared_info->evtchn_pending) )
- return 0;
-
prod = ACCESS_ONCE(cons_ring->in_prod);
cons = cons_ring->in_cons;
@@ -125,8 +126,6 @@ size_t pv_console_rx(struct cpu_user_regs *regs)
ACCESS_ONCE(cons_ring->in_cons) = cons;
notify_daemon();
- clear_bit(cons_evtchn, XEN_shared_info->evtchn_pending);
-
return recv;
}
diff --git a/xen/include/asm-x86/hypercall.h b/xen/include/asm-x86/hypercall.h
index 3eb4a8db89..b9f3ecf9a3 100644
--- a/xen/include/asm-x86/hypercall.h
+++ b/xen/include/asm-x86/hypercall.h
@@ -28,6 +28,9 @@ extern const hypercall_args_t hypercall_args_table[NR_hypercalls];
void pv_hypercall(struct cpu_user_regs *regs);
void hypercall_page_initialise_ring3_kernel(void *hypercall_page);
void hypercall_page_initialise_ring1_kernel(void *hypercall_page);
+void pv_hypercall_table_replace(unsigned int hypercall, hypercall_fn_t * native,
+ hypercall_fn_t *compat);
+hypercall_fn_t *pv_get_hypercall_handler(unsigned int hypercall, bool compat);
/*
* Both do_mmuext_op() and do_mmu_update():
diff --git a/xen/include/asm-x86/pv/shim.h b/xen/include/asm-x86/pv/shim.h
index ff7c050dc6..ab656fd854 100644
--- a/xen/include/asm-x86/pv/shim.h
+++ b/xen/include/asm-x86/pv/shim.h
@@ -36,6 +36,7 @@ void pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
unsigned long console_va, unsigned long vphysmap,
start_info_t *si);
void pv_shim_shutdown(uint8_t reason);
+void pv_shim_inject_evtchn(unsigned int port);
domid_t get_initial_domain_id(void);
#else
@@ -53,6 +54,10 @@ static inline void pv_shim_shutdown(uint8_t reason)
{
ASSERT_UNREACHABLE();
}
+static inline void pv_shim_inject_evtchn(unsigned int port)
+{
+ ASSERT_UNREACHABLE();
+}
static inline domid_t get_initial_domain_id(void)
{
return 0;
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 87915ead69..ebb879e88d 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -48,6 +48,21 @@ int evtchn_send(struct domain *d, unsigned int lport);
/* Bind a local event-channel port to the specified VCPU. */
long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
+/* Bind a VIRQ. */
+int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port);
+
+/* Get the status of an event channel port. */
+int evtchn_status(evtchn_status_t *status);
+
+/* Close an event channel. */
+int evtchn_close(struct domain *d1, int port1, bool guest);
+
+/* Free an event channel. */
+void evtchn_free(struct domain *d, struct evtchn *chn);
+
+/* Allocate a specific event channel port. */
+int evtchn_allocate_port(struct domain *d, unsigned int port);
+
/* Unmask a local event-channel port. */
int evtchn_unmask(unsigned int port);
diff --git a/xen/include/xen/pv_console.h b/xen/include/xen/pv_console.h
index e578b56620..cb92539666 100644
--- a/xen/include/xen/pv_console.h
+++ b/xen/include/xen/pv_console.h
@@ -10,6 +10,7 @@ void pv_console_set_rx_handler(serial_rx_fn fn);
void pv_console_init_postirq(void);
void pv_console_puts(const char *buf);
size_t pv_console_rx(struct cpu_user_regs *regs);
+evtchn_port_t pv_console_evtchn(void);
#else
@@ -18,6 +19,11 @@ static inline void pv_console_set_rx_handler(serial_rx_fn fn) { }
static inline void pv_console_init_postirq(void) { }
static inline void pv_console_puts(const char *buf) { }
static inline size_t pv_console_rx(struct cpu_user_regs *regs) { return 0; }
+evtchn_port_t pv_console_evtchn(void)
+{
+ ASSERT_UNREACHABLE();
+ return 0;
+}
#endif /* !CONFIG_XEN_GUEST */
#endif /* __XEN_PV_CONSOLE_H__ */
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-01-12 11:58 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-12 11:28 [PATCH v2 00/62] Comet: Run PV in PVH container Wei Liu
2018-01-12 11:28 ` [PATCH v2 01/62] x86/svm: Offer CPUID Faulting to AMD HVM guests as well Wei Liu
2018-01-12 11:28 ` [PATCH v2 02/62] xen/x86: report domain id on cpuid Wei Liu
2018-01-12 11:28 ` [PATCH v2 03/62] tools/libxc: remove extraneous newline in xc_dom_load_acpi Wei Liu
2018-01-12 11:28 ` [PATCH v2 04/62] tools/libelf: fix elf notes check for PVH guest Wei Liu
2018-01-12 11:28 ` [PATCH v2 05/62] tools/libxc: Multi modules support Wei Liu
2018-01-12 11:28 ` [PATCH v2 06/62] xen/common: Widen the guest logging buffer slightly Wei Liu
2018-01-12 11:28 ` [PATCH v2 07/62] x86/time: Print a more helpful error when a platform timer can't be found Wei Liu
2018-01-12 11:28 ` [PATCH v2 08/62] x86/link: Introduce and use SECTION_ALIGN Wei Liu
2018-01-12 11:28 ` [PATCH v2 09/62] ACPICA: Make ACPI Power Management Timer (PM Timer) optional Wei Liu
2018-01-12 11:28 ` [PATCH v2 10/62] xen/domctl: Return arch_config via getdomaininfo Wei Liu
2018-01-12 11:28 ` [PATCH v2 11/62] tools/ocaml: Expose arch_config in domaininfo Wei Liu
2018-01-12 11:28 ` [PATCH v2 12/62] tools/ocaml: Extend domain_create() to take arch_domainconfig Wei Liu
2018-01-12 11:28 ` [PATCH v2 13/62] x86/fixmap: Modify fix_to_virt() to return a void pointer Wei Liu
2018-01-12 11:28 ` [PATCH v2 14/62] x86: Common cpuid faulting support Wei Liu
2018-01-12 11:28 ` [PATCH v2 15/62] x86/Kconfig: Options for Xen and PVH support Wei Liu
2018-01-12 11:28 ` [PATCH v2 16/62] x86/link: Relocate program headers Wei Liu
2018-01-12 11:28 ` [PATCH v2 17/62] x86: introduce ELFNOTE macro Wei Liu
2018-01-12 11:28 ` [PATCH v2 18/62] x86: produce a binary that can be booted as PVH Wei Liu
2018-01-12 11:28 ` [PATCH v2 19/62] x86/entry: Early PVH boot code Wei Liu
2018-01-12 11:28 ` [PATCH v2 20/62] x86/boot: Map more than the first 16MB Wei Liu
2018-01-12 11:28 ` [PATCH v2 21/62] x86/entry: Probe for Xen early during boot Wei Liu
2018-01-12 11:28 ` [PATCH v2 22/62] x86/guest: Hypercall support Wei Liu
2018-01-12 11:28 ` [PATCH v2 23/62] x86/shutdown: Support for using SCHEDOP_{shutdown, reboot} Wei Liu
2018-01-12 11:28 ` [PATCH v2 24/62] x86/pvh: Retrieve memory map from Xen Wei Liu
2018-01-12 11:28 ` [PATCH v2 25/62] xen/console: Introduce console=xen Wei Liu
2018-01-12 11:28 ` [PATCH v2 26/62] xen: introduce rangeset_claim_range Wei Liu
2018-01-12 11:28 ` [PATCH v2 27/62] xen/pvshim: keep track of used PFN ranges Wei Liu
2018-01-12 11:28 ` [PATCH v2 28/62] x86/guest: map shared_info page Wei Liu
2018-01-12 11:28 ` [PATCH v2 29/62] xen/guest: fetch vCPU ID from Xen Wei Liu
2018-01-12 11:28 ` [PATCH v2 30/62] x86/guest: map per-cpu vcpu_info area Wei Liu
2018-01-12 11:28 ` [PATCH v2 31/62] x86: xen pv clock time source Wei Liu
2018-01-12 20:45 ` Joao Martins
2018-01-16 12:22 ` Wei Liu
2018-01-12 11:28 ` [PATCH v2 32/62] x86: APIC timer calibration when running as a guest Wei Liu
2018-01-12 11:28 ` [PATCH v2 33/62] x86: read wallclock from Xen when running in pvh mode Wei Liu
2018-01-12 11:28 ` [PATCH v2 34/62] x86: don't swallow the first command line item in guest mode Wei Liu
2018-01-12 11:28 ` [PATCH v2 35/62] x86/guest: setup event channel upcall vector Wei Liu
2018-01-12 11:28 ` [PATCH v2 36/62] x86/guest: add PV console code Wei Liu
2018-01-12 11:28 ` [PATCH v2 37/62] x86/guest: use PV console for Xen/Dom0 I/O Wei Liu
2018-01-12 11:28 ` [PATCH v2 38/62] x86/shim: Kconfig and command line options Wei Liu
2018-01-12 11:28 ` [PATCH v2 39/62] tools/firmware: Build and install xen-shim Wei Liu
2018-01-12 11:28 ` [PATCH v2 40/62] xen/x86: make VGA support selectable Wei Liu
2018-01-12 11:28 ` [PATCH v2 41/62] xen/pvh: do not mark the low 1MB as IO mem Wei Liu
2018-01-12 11:28 ` [PATCH v2 42/62] sched/null: skip vCPUs on the waitqueue that are blocked Wei Liu
2018-01-12 11:28 ` [PATCH v2 43/62] xen/pvshim: skip Dom0-only domain builder parts Wei Liu
2018-01-12 11:28 ` [PATCH v2 44/62] xen: mark xenstore/console pages as RAM Wei Liu
2018-01-12 11:28 ` [PATCH v2 45/62] xen/pvshim: modify Dom0 builder in order to build a DomU Wei Liu
2018-01-12 11:28 ` [PATCH v2 46/62] xen/pvshim: set correct domid value Wei Liu
2018-01-12 11:28 ` Wei Liu [this message]
2018-01-12 11:28 ` [PATCH v2 48/62] xen/pvshim: add grant table operations Wei Liu
2018-01-12 11:28 ` [PATCH v2 49/62] x86/pv-shim: shadow PV console's page for L2 DomU Wei Liu
2018-01-12 11:28 ` [PATCH v2 50/62] xen/pvshim: add migration support Wei Liu
2018-01-12 11:28 ` [PATCH v2 51/62] xen/pvshim: add shim_mem cmdline parameter Wei Liu
2018-01-12 11:29 ` [PATCH v2 52/62] xen/pvshim: set max_pages to the value of tot_pages Wei Liu
2018-01-12 11:29 ` [PATCH v2 53/62] xen/pvshim: support vCPU hotplug Wei Liu
2018-01-12 11:29 ` [PATCH v2 54/62] xen/pvshim: memory hotplug Wei Liu
2018-01-12 11:29 ` [PATCH v2 55/62] xen/shim: modify shim_mem parameter behaviour Wei Liu
2018-01-12 11:29 ` [PATCH v2 56/62] xen/pvshim: use default position for the m2p mappings Wei Liu
2018-01-12 11:29 ` [PATCH v2 57/62] xen/shim: crash instead of reboot in shim mode Wei Liu
2018-01-12 11:29 ` [PATCH v2 58/62] xen/shim: allow DomU to have as many vcpus as available Wei Liu
2018-01-12 11:29 ` [PATCH v2 59/62] libxl: pvshim: Provide first-class config settings to enable shim mode Wei Liu
2018-01-12 14:45 ` [PATCH] fixup! " Ian Jackson
2018-01-12 11:29 ` [PATCH v2 60/62] libxl: pvshim: Introduce pvshim_extra Wei Liu
2018-01-12 11:29 ` [PATCH v2 61/62] xl: pvshim: Provide and document xl config Wei Liu
2018-01-12 11:29 ` [PATCH v2 62/62] xl: Default guest mode changed from PV to PVH with PV shim Wei Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180112112910.16762-48-wei.liu2@citrix.com \
--to=wei.liu2@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).