From: Ian Campbell <ian.campbell@citrix.com>
To: ian.jackson@eu.citrix.com, wei.liu2@citrix.com, xen-devel@lists.xen.org
Cc: Ian Campbell <ian.campbell@citrix.com>,
qemu-devel@nongnu.org, stefano.stabellini@eu.citrix.com
Subject: [Qemu-devel] [PATCH QEMU-XEN v4 2/9] xen: Switch to libxenevtchn interface for compat shims.
Date: Wed, 21 Oct 2015 16:23:51 +0100 [thread overview]
Message-ID: <1445441038-25903-3-git-send-email-ian.campbell@citrix.com> (raw)
In-Reply-To: <1445441038-25903-1-git-send-email-ian.campbell@citrix.com>
In Xen 4.7 we are refactoring parts libxenctrl into a number of
separate libraries which will provide backward and forward API and ABI
compatiblity.
One such library will be libxenevtchn which provides access to event
channels.
In preparation for this switch the compatibility layer in xen_common.h
(which support building with older versions of Xen) to use what will
be the new library API. This means that the evtchn shim will disappear
for versions of Xen which include libxenevtchn.
To simplify things for the <= 4.0.0 support we wrap the int fd in a
malloc(sizeof int) such that the handle is always a pointer. This
leads to less typedef headaches and the need for
XC_HANDLER_INITIAL_VALUE etc for these interfaces.
Build tested with 4.0 and 4.5.
Note that this patch does not add any support for actually using
libxenevtchn, it just adjusts the existing shims.
Note that xc_evtchn_alloc_unbound functionality remains in libxenctrl,
since that functionality is not exposed by /dev/xen/evtchn.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v4: Ran checkpatch, fixed all errors
Allocate correct size for handle (i.e. not size of the ptr)
---
hw/xen/xen_backend.c | 31 ++++++++++++++++---------------
include/hw/xen/xen_backend.h | 2 +-
include/hw/xen/xen_common.h | 44 ++++++++++++++++++++++++++++++++++----------
xen-hvm.c | 25 +++++++++++++------------
4 files changed, 64 insertions(+), 38 deletions(-)
diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c
index b2cb22b..342ec9b 100644
--- a/hw/xen/xen_backend.c
+++ b/hw/xen/xen_backend.c
@@ -243,19 +243,19 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev,
xendev->debug = debug;
xendev->local_port = -1;
- xendev->evtchndev = xen_xc_evtchn_open(NULL, 0);
- if (xendev->evtchndev == XC_HANDLER_INITIAL_VALUE) {
+ xendev->evtchndev = xenevtchn_open(NULL, 0);
+ if (xendev->evtchndev == NULL) {
xen_be_printf(NULL, 0, "can't open evtchn device\n");
g_free(xendev);
return NULL;
}
- fcntl(xc_evtchn_fd(xendev->evtchndev), F_SETFD, FD_CLOEXEC);
+ fcntl(xenevtchn_fd(xendev->evtchndev), F_SETFD, FD_CLOEXEC);
if (ops->flags & DEVOPS_FLAG_NEED_GNTDEV) {
xendev->gnttabdev = xen_xc_gnttab_open(NULL, 0);
if (xendev->gnttabdev == XC_HANDLER_INITIAL_VALUE) {
xen_be_printf(NULL, 0, "can't open gnttab device\n");
- xc_evtchn_close(xendev->evtchndev);
+ xenevtchn_close(xendev->evtchndev);
g_free(xendev);
return NULL;
}
@@ -306,8 +306,8 @@ static struct XenDevice *xen_be_del_xendev(int dom, int dev)
g_free(xendev->fe);
}
- if (xendev->evtchndev != XC_HANDLER_INITIAL_VALUE) {
- xc_evtchn_close(xendev->evtchndev);
+ if (xendev->evtchndev != NULL) {
+ xenevtchn_close(xendev->evtchndev);
}
if (xendev->gnttabdev != XC_HANDLER_INITIAL_VALUE) {
xc_gnttab_close(xendev->gnttabdev);
@@ -691,13 +691,14 @@ static void xen_be_evtchn_event(void *opaque)
struct XenDevice *xendev = opaque;
evtchn_port_t port;
- port = xc_evtchn_pending(xendev->evtchndev);
+ port = xenevtchn_pending(xendev->evtchndev);
if (port != xendev->local_port) {
- xen_be_printf(xendev, 0, "xc_evtchn_pending returned %d (expected %d)\n",
+ xen_be_printf(xendev, 0,
+ "xenevtchn_pending returned %d (expected %d)\n",
port, xendev->local_port);
return;
}
- xc_evtchn_unmask(xendev->evtchndev, port);
+ xenevtchn_unmask(xendev->evtchndev, port);
if (xendev->ops->event) {
xendev->ops->event(xendev);
@@ -742,14 +743,14 @@ int xen_be_bind_evtchn(struct XenDevice *xendev)
if (xendev->local_port != -1) {
return 0;
}
- xendev->local_port = xc_evtchn_bind_interdomain
+ xendev->local_port = xenevtchn_bind_interdomain
(xendev->evtchndev, xendev->dom, xendev->remote_port);
if (xendev->local_port == -1) {
- xen_be_printf(xendev, 0, "xc_evtchn_bind_interdomain failed\n");
+ xen_be_printf(xendev, 0, "xenevtchn_bind_interdomain failed\n");
return -1;
}
xen_be_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
- qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev),
+ qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev),
xen_be_evtchn_event, NULL, xendev);
return 0;
}
@@ -759,15 +760,15 @@ void xen_be_unbind_evtchn(struct XenDevice *xendev)
if (xendev->local_port == -1) {
return;
}
- qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL);
- xc_evtchn_unbind(xendev->evtchndev, xendev->local_port);
+ qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), NULL, NULL, NULL);
+ xenevtchn_unbind(xendev->evtchndev, xendev->local_port);
xen_be_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port);
xendev->local_port = -1;
}
int xen_be_send_notify(struct XenDevice *xendev)
{
- return xc_evtchn_notify(xendev->evtchndev, xendev->local_port);
+ return xenevtchn_notify(xendev->evtchndev, xendev->local_port);
}
/*
diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h
index 3b4125e..a90314f 100644
--- a/include/hw/xen/xen_backend.h
+++ b/include/hw/xen/xen_backend.h
@@ -46,7 +46,7 @@ struct XenDevice {
int remote_port;
int local_port;
- XenEvtchn evtchndev;
+ xenevtchn_handle *evtchndev;
XenGnttab gnttabdev;
struct XenDevOps *ops;
diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h
index 5923290..4dc2ee6 100644
--- a/include/hw/xen/xen_common.h
+++ b/include/hw/xen/xen_common.h
@@ -39,17 +39,38 @@ static inline void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 410
typedef int XenXC;
-typedef int XenEvtchn;
+typedef int xenevtchn_handle;
typedef int XenGnttab;
# define XC_INTERFACE_FMT "%i"
# define XC_HANDLER_INITIAL_VALUE -1
-static inline XenEvtchn xen_xc_evtchn_open(void *logger,
- unsigned int open_flags)
+static inline xenevtchn_handle *xenevtchn_open(void *logger,
+ unsigned int open_flags)
{
- return xc_evtchn_open();
+ xenevtchn_handle *h = malloc(sizeof(*h));
+ if (!h) {
+ return NULL;
+ }
+ *h = xc_evtchn_open();
+ if (*h == -1) {
+ free(h);
+ h = NULL;
+ }
+ return h;
}
+static inline int xenevtchn_close(xenevtchn_handle *h)
+{
+ int rc = xc_evtchn_close(*h);
+ free(h);
+ return rc;
+}
+#define xenevtchn_fd(h) xc_evtchn_fd(*h)
+#define xenevtchn_pending(h) xc_evtchn_pending(*h)
+#define xenevtchn_notify(h, p) xc_evtchn_notify(*h, p)
+#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(*h, d, p)
+#define xenevtchn_unmask(h, p) xc_evtchn_unmask(*h, p)
+#define xenevtchn_unbind(h, p) xc_evtchn_unmask(*h, p)
static inline XenGnttab xen_xc_gnttab_open(void *logger,
unsigned int open_flags)
@@ -108,17 +129,20 @@ static inline void xs_close(struct xs_handle *xsh)
#else
typedef xc_interface *XenXC;
-typedef xc_evtchn *XenEvtchn;
+typedef xc_evtchn xenevtchn_handle;
typedef xc_gnttab *XenGnttab;
# define XC_INTERFACE_FMT "%p"
# define XC_HANDLER_INITIAL_VALUE NULL
-static inline XenEvtchn xen_xc_evtchn_open(void *logger,
- unsigned int open_flags)
-{
- return xc_evtchn_open(logger, open_flags);
-}
+#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
+#define xenevtchn_close(h) xc_evtchn_close(h)
+#define xenevtchn_fd(h) xc_evtchn_fd(h)
+#define xenevtchn_pending(h) xc_evtchn_pending(h)
+#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
+#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
+#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
+#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
static inline XenGnttab xen_xc_gnttab_open(void *logger,
unsigned int open_flags)
diff --git a/xen-hvm.c b/xen-hvm.c
index 3a7fd58..4470d58 100644
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -109,7 +109,7 @@ typedef struct XenIOState {
/* evtchn local port for buffered io */
evtchn_port_t bufioreq_local_port;
/* the evtchn fd for polling */
- XenEvtchn xce_handle;
+ xenevtchn_handle *xce_handle;
/* which vcpu we are serving */
int send_vcpu;
@@ -709,7 +709,7 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
int i;
evtchn_port_t port;
- port = xc_evtchn_pending(state->xce_handle);
+ port = xenevtchn_pending(state->xce_handle);
if (port == state->bufioreq_local_port) {
timer_mod(state->buffered_io_timer,
BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
@@ -728,7 +728,7 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
}
/* unmask the wanted port again */
- xc_evtchn_unmask(state->xce_handle, port);
+ xenevtchn_unmask(state->xce_handle, port);
/* get the io packet from shared memory */
state->send_vcpu = i;
@@ -1014,7 +1014,7 @@ static void handle_buffered_io(void *opaque)
BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
} else {
timer_del(state->buffered_io_timer);
- xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
+ xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
}
}
@@ -1058,7 +1058,8 @@ static void cpu_handle_ioreq(void *opaque)
}
req->state = STATE_IORESP_READY;
- xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]);
+ xenevtchn_notify(state->xce_handle,
+ state->ioreq_local_port[state->send_vcpu]);
}
}
@@ -1066,8 +1067,8 @@ static void xen_main_loop_prepare(XenIOState *state)
{
int evtchn_fd = -1;
- if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
- evtchn_fd = xc_evtchn_fd(state->xce_handle);
+ if (state->xce_handle != NULL) {
+ evtchn_fd = xenevtchn_fd(state->xce_handle);
}
state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
@@ -1105,7 +1106,7 @@ static void xen_exit_notifier(Notifier *n, void *data)
{
XenIOState *state = container_of(n, XenIOState, exit);
- xc_evtchn_close(state->xce_handle);
+ xenevtchn_close(state->xce_handle);
xs_daemon_close(state->xenstore);
}
@@ -1174,8 +1175,8 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
state = g_malloc0(sizeof (XenIOState));
- state->xce_handle = xen_xc_evtchn_open(NULL, 0);
- if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
+ state->xce_handle = xenevtchn_open(NULL, 0);
+ if (state->xce_handle == NULL) {
perror("xen: event channel open");
return -1;
}
@@ -1255,7 +1256,7 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
/* FIXME: how about if we overflow the page here? */
for (i = 0; i < max_cpus; i++) {
- rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+ rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
xen_vcpu_eport(state->shared_page, i));
if (rc == -1) {
fprintf(stderr, "shared evtchn %d bind error %d\n", i, errno);
@@ -1264,7 +1265,7 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
state->ioreq_local_port[i] = rc;
}
- rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+ rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
bufioreq_evtchn);
if (rc == -1) {
fprintf(stderr, "buffered evtchn bind error %d\n", errno);
--
2.1.4
next prev parent reply other threads:[~2015-10-21 15:24 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-21 15:22 [Qemu-devel] [PATCH v4 0/<VARIOUS>] Begin to disentangle libxenctrl and provide some stable libraries Ian Campbell
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 0/9] " Ian Campbell
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 1/9] xen_console: correctly cleanup primary console on teardown Ian Campbell
2015-10-22 16:46 ` Stefano Stabellini
2015-10-21 15:23 ` Ian Campbell [this message]
2015-10-23 11:06 ` [Qemu-devel] [PATCH QEMU-XEN v4 2/9] xen: Switch to libxenevtchn interface for compat shims Stefano Stabellini
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 3/9] xen: Switch to libxengnttab " Ian Campbell
2015-10-23 11:06 ` Stefano Stabellini
2015-10-23 11:15 ` Ian Campbell
2015-10-23 12:42 ` Ian Campbell
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 4/9] xen: Switch uses of xc_map_foreign_range into xc_map_foreign_bulk Ian Campbell
2015-10-23 11:07 ` Stefano Stabellini
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 5/9] xen: Switch uses of xc_map_foreign_pages " Ian Campbell
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 6/9] xen: Switch uses of xc_map_foreign_bulk to use libxenforeignmemory API Ian Campbell
2015-10-23 11:06 ` Stefano Stabellini
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 7/9] xen: Use stable library interfaces when they are available Ian Campbell
2015-10-23 11:31 ` Stefano Stabellini
2015-10-23 12:25 ` Ian Campbell
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 8/9] xen: domainbuild: reopen libxenctrl interface after forking for domain watcher Ian Campbell
2015-10-21 15:23 ` [Qemu-devel] [PATCH QEMU-XEN v4 9/9] xen: make it possible to build without the Xen PV domain builder Ian Campbell
2015-10-22 11:07 ` Ian Campbell
2015-10-23 11:12 ` Stefano Stabellini
2015-10-23 11:19 ` Ian Campbell
2015-10-23 11:35 ` Stefano Stabellini
2015-10-23 12:23 ` Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1445441038-25903-3-git-send-email-ian.campbell@citrix.com \
--to=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=qemu-devel@nongnu.org \
--cc=stefano.stabellini@eu.citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).