From: David Woodhouse <dwmw2@infradead.org>
To: Peter Maydell <peter.maydell@linaro.org>
Cc: qemu-devel@nongnu.org, Paolo Bonzini <pbonzini@redhat.com>,
Paul Durrant <paul@xen.org>,
Joao Martins <joao.m.martins@oracle.com>,
Ankur Arora <ankur.a.arora@oracle.com>,
Stefano Stabellini <sstabellini@kernel.org>,
vikram.garhwal@amd.com,
Anthony Perard <anthony.perard@citrix.com>,
xen-devel@lists.xenproject.org,
Juan Quintela <quintela@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>
Subject: [PULL 22/27] hw/xen: Add emulated implementation of XenStore operations
Date: Tue, 7 Mar 2023 18:27:02 +0000 [thread overview]
Message-ID: <20230307182707.2298618-23-dwmw2@infradead.org> (raw)
In-Reply-To: <20230307182707.2298618-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
Now that we have an internal implementation of XenStore, we can populate
the xenstore_backend_ops to allow PV backends to talk to it.
Watches can't be processed with immediate callbacks because that would
call back into XenBus code recursively. Defer them to a QEMUBH to be run
as appropriate from the main loop. We use a QEMUBH per XS handle, and it
walks all the watches (there shouldn't be many per handle) to fire any
which have pending events. We *could* have done it differently but this
allows us to use the same struct watch_event as we have for the guest
side, and keeps things relatively simple.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
---
hw/i386/kvm/xen_xenstore.c | 273 ++++++++++++++++++++++++++++++++++++-
1 file changed, 269 insertions(+), 4 deletions(-)
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index 35898e9b37..bf466c71ed 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -49,7 +49,7 @@ struct XenXenstoreState {
/*< public >*/
XenstoreImplState *impl;
- GList *watch_events;
+ GList *watch_events; /* for the guest */
MemoryRegion xenstore_page;
struct xenstore_domain_interface *xs;
@@ -73,6 +73,8 @@ struct XenXenstoreState *xen_xenstore_singleton;
static void xen_xenstore_event(void *opaque);
static void fire_watch_cb(void *opaque, const char *path, const char *token);
+static struct xenstore_backend_ops emu_xenstore_backend_ops;
+
static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState *s,
GList *perms,
const char *relpath,
@@ -169,6 +171,8 @@ static void xen_xenstore_realize(DeviceState *dev, Error **errp)
relpath_printf(s, perms, "feature", "%s", "");
g_list_free_full(perms, g_free);
+
+ xen_xenstore_ops = &emu_xenstore_backend_ops;
}
static bool xen_xenstore_is_needed(void *opaque)
@@ -1306,6 +1310,15 @@ struct watch_event {
char *token;
};
+static void free_watch_event(struct watch_event *ev)
+{
+ if (ev) {
+ g_free(ev->path);
+ g_free(ev->token);
+ g_free(ev);
+ }
+}
+
static void queue_watch(XenXenstoreState *s, const char *path,
const char *token)
{
@@ -1352,9 +1365,7 @@ static void process_watch_events(XenXenstoreState *s)
deliver_watch(s, ev->path, ev->token);
s->watch_events = g_list_remove(s->watch_events, ev);
- g_free(ev->path);
- g_free(ev->token);
- g_free(ev);
+ free_watch_event(ev);
}
static void xen_xenstore_event(void *opaque)
@@ -1444,3 +1455,257 @@ int xen_xenstore_reset(void)
return 0;
}
+
+struct qemu_xs_handle {
+ XenstoreImplState *impl;
+ GList *watches;
+ QEMUBH *watch_bh;
+};
+
+struct qemu_xs_watch {
+ struct qemu_xs_handle *h;
+ char *path;
+ xs_watch_fn fn;
+ void *opaque;
+ GList *events;
+};
+
+static char *xs_be_get_domain_path(struct qemu_xs_handle *h, unsigned int domid)
+{
+ return g_strdup_printf("/local/domain/%u", domid);
+}
+
+static char **xs_be_directory(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *num)
+{
+ GList *items = NULL, *l;
+ unsigned int i = 0;
+ char **items_ret;
+ int err;
+
+ err = xs_impl_directory(h->impl, DOMID_QEMU, t, path, NULL, &items);
+ if (err) {
+ errno = err;
+ return NULL;
+ }
+
+ items_ret = g_new0(char *, g_list_length(items) + 1);
+ *num = 0;
+ for (l = items; l; l = l->next) {
+ items_ret[i++] = l->data;
+ (*num)++;
+ }
+ g_list_free(items);
+ return items_ret;
+}
+
+static void *xs_be_read(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *len)
+{
+ GByteArray *data = g_byte_array_new();
+ bool free_segment = false;
+ int err;
+
+ err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data);
+ if (err) {
+ free_segment = true;
+ errno = err;
+ } else {
+ if (len) {
+ *len = data->len;
+ }
+ /* The xen-bus-helper code expects to get NUL terminated string! */
+ g_byte_array_append(data, (void *)"", 1);
+ }
+
+ return g_byte_array_free(data, free_segment);
+}
+
+static bool xs_be_write(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, const void *data, unsigned int len)
+{
+ GByteArray *gdata = g_byte_array_new();
+ int err;
+
+ g_byte_array_append(gdata, data, len);
+ err = xs_impl_write(h->impl, DOMID_QEMU, t, path, gdata);
+ g_byte_array_unref(gdata);
+ if (err) {
+ errno = err;
+ return false;
+ }
+ return true;
+}
+
+static bool xs_be_create(struct qemu_xs_handle *h, xs_transaction_t t,
+ unsigned int owner, unsigned int domid,
+ unsigned int perms, const char *path)
+{
+ g_autoptr(GByteArray) data = g_byte_array_new();
+ GList *perms_list = NULL;
+ int err;
+
+ /* mkdir does this */
+ err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data);
+ if (err == ENOENT) {
+ err = xs_impl_write(h->impl, DOMID_QEMU, t, path, data);
+ }
+ if (err) {
+ errno = err;
+ return false;
+ }
+
+ perms_list = g_list_append(perms_list,
+ xs_perm_as_string(XS_PERM_NONE, owner));
+ perms_list = g_list_append(perms_list,
+ xs_perm_as_string(perms, domid));
+
+ err = xs_impl_set_perms(h->impl, DOMID_QEMU, t, path, perms_list);
+ g_list_free_full(perms_list, g_free);
+ if (err) {
+ errno = err;
+ return false;
+ }
+ return true;
+}
+
+static bool xs_be_destroy(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path)
+{
+ int err = xs_impl_rm(h->impl, DOMID_QEMU, t, path);
+ if (err) {
+ errno = err;
+ return false;
+ }
+ return true;
+}
+
+static void be_watch_bh(void *_h)
+{
+ struct qemu_xs_handle *h = _h;
+ GList *l;
+
+ for (l = h->watches; l; l = l->next) {
+ struct qemu_xs_watch *w = l->data;
+
+ while (w->events) {
+ struct watch_event *ev = w->events->data;
+
+ w->fn(w->opaque, ev->path);
+
+ w->events = g_list_remove(w->events, ev);
+ free_watch_event(ev);
+ }
+ }
+}
+
+static void xs_be_watch_cb(void *opaque, const char *path, const char *token)
+{
+ struct watch_event *ev = g_new0(struct watch_event, 1);
+ struct qemu_xs_watch *w = opaque;
+
+ /* We don't care about the token */
+ ev->path = g_strdup(path);
+ w->events = g_list_append(w->events, ev);
+
+ qemu_bh_schedule(w->h->watch_bh);
+}
+
+static struct qemu_xs_watch *xs_be_watch(struct qemu_xs_handle *h,
+ const char *path, xs_watch_fn fn,
+ void *opaque)
+{
+ struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1);
+ int err;
+
+ w->h = h;
+ w->fn = fn;
+ w->opaque = opaque;
+
+ err = xs_impl_watch(h->impl, DOMID_QEMU, path, NULL, xs_be_watch_cb, w);
+ if (err) {
+ errno = err;
+ g_free(w);
+ return NULL;
+ }
+
+ w->path = g_strdup(path);
+ h->watches = g_list_append(h->watches, w);
+ return w;
+}
+
+static void xs_be_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w)
+{
+ xs_impl_unwatch(h->impl, DOMID_QEMU, w->path, NULL, xs_be_watch_cb, w);
+
+ h->watches = g_list_remove(h->watches, w);
+ g_list_free_full(w->events, (GDestroyNotify)free_watch_event);
+ g_free(w->path);
+ g_free(w);
+}
+
+static xs_transaction_t xs_be_transaction_start(struct qemu_xs_handle *h)
+{
+ unsigned int new_tx = XBT_NULL;
+ int err = xs_impl_transaction_start(h->impl, DOMID_QEMU, &new_tx);
+ if (err) {
+ errno = err;
+ return XBT_NULL;
+ }
+ return new_tx;
+}
+
+static bool xs_be_transaction_end(struct qemu_xs_handle *h, xs_transaction_t t,
+ bool abort)
+{
+ int err = xs_impl_transaction_end(h->impl, DOMID_QEMU, t, !abort);
+ if (err) {
+ errno = err;
+ return false;
+ }
+ return true;
+}
+
+static struct qemu_xs_handle *xs_be_open(void)
+{
+ XenXenstoreState *s = xen_xenstore_singleton;
+ struct qemu_xs_handle *h;
+
+ if (!s && !s->impl) {
+ errno = -ENOSYS;
+ return NULL;
+ }
+
+ h = g_new0(struct qemu_xs_handle, 1);
+ h->impl = s->impl;
+
+ h->watch_bh = aio_bh_new(qemu_get_aio_context(), be_watch_bh, h);
+
+ return h;
+}
+
+static void xs_be_close(struct qemu_xs_handle *h)
+{
+ while (h->watches) {
+ struct qemu_xs_watch *w = h->watches->data;
+ xs_be_unwatch(h, w);
+ }
+
+ qemu_bh_delete(h->watch_bh);
+ g_free(h);
+}
+
+static struct xenstore_backend_ops emu_xenstore_backend_ops = {
+ .open = xs_be_open,
+ .close = xs_be_close,
+ .get_domain_path = xs_be_get_domain_path,
+ .directory = xs_be_directory,
+ .read = xs_be_read,
+ .write = xs_be_write,
+ .create = xs_be_create,
+ .destroy = xs_be_destroy,
+ .watch = xs_be_watch,
+ .unwatch = xs_be_unwatch,
+ .transaction_start = xs_be_transaction_start,
+ .transaction_end = xs_be_transaction_end,
+};
--
2.39.0
next prev parent reply other threads:[~2023-03-07 18:28 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-07 18:26 [PULL 00/27] Enable PV backends with Xen/KVM emulation David Woodhouse
2023-03-07 18:26 ` [PULL 01/27] hw/xen: Add xenstore wire implementation and implementation stubs David Woodhouse
2023-03-07 18:26 ` [PULL 02/27] hw/xen: Add basic XenStore tree walk and write/read/directory support David Woodhouse
2023-03-07 18:26 ` [PULL 03/27] hw/xen: Implement XenStore watches David Woodhouse
2023-03-07 18:26 ` [PULL 04/27] hw/xen: Implement XenStore transactions David Woodhouse
2023-03-07 18:26 ` [PULL 05/27] hw/xen: Watches on " David Woodhouse
2023-05-02 17:08 ` Peter Maydell
2023-06-02 17:06 ` Peter Maydell
2023-06-20 12:19 ` Peter Maydell
2023-06-20 17:57 ` David Woodhouse
2023-06-20 17:58 ` [PATCH] hw/xen: Clarify (lack of) error handling in transaction_commit() David Woodhouse
2023-07-26 9:23 ` Paul Durrant
2023-03-07 18:26 ` [PULL 06/27] hw/xen: Implement XenStore permissions David Woodhouse
2023-03-07 18:26 ` [PULL 07/27] hw/xen: Implement core serialize/deserialize methods for xenstore_impl David Woodhouse
2023-03-07 18:26 ` [PULL 08/27] hw/xen: Create initial XenStore nodes David Woodhouse
2023-03-07 18:26 ` [PULL 09/27] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-03-07 18:26 ` [PULL 10/27] hw/xen: Add gnttab " David Woodhouse
2023-03-07 18:26 ` [PULL 11/27] hw/xen: Pass grant ref to gnttab unmap operation David Woodhouse
2023-03-07 18:26 ` [PULL 12/27] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-03-07 18:26 ` [PULL 13/27] hw/xen: Add xenstore " David Woodhouse
2023-03-12 19:19 ` Jason Andryuk
2023-03-13 8:45 ` David Woodhouse
2023-03-13 23:17 ` Jason Andryuk
2023-03-14 8:32 ` David Woodhouse
2023-03-14 8:35 ` [PATCH] accel/xen: Fix DM state change notification in dm_restrict mode David Woodhouse
2023-03-14 9:05 ` Paul Durrant
2023-03-14 10:49 ` Jason Andryuk
2023-04-04 17:35 ` [PULL 13/27] hw/xen: Add xenstore operations to allow redirection to internal emulation Peter Maydell
2023-04-04 17:45 ` David Woodhouse
2023-04-04 17:45 ` Peter Maydell
2023-04-04 18:21 ` David Woodhouse
2023-03-07 18:26 ` [PULL 14/27] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-04-06 15:18 ` Peter Maydell
2023-03-07 18:26 ` [PULL 15/27] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers David Woodhouse
2023-03-07 18:26 ` [PULL 16/27] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-03-07 18:26 ` [PULL 17/27] hw/xen: Build PV backend drivers for CONFIG_XEN_BUS David Woodhouse
2023-03-07 18:26 ` [PULL 18/27] hw/xen: Avoid crash when backend watch fires too early David Woodhouse
2023-03-07 18:26 ` [PULL 19/27] hw/xen: Only advertise ring-page-order for xen-block if gnttab supports it David Woodhouse
2023-03-07 18:27 ` [PULL 20/27] hw/xen: Hook up emulated implementation for event channel operations David Woodhouse
2023-03-07 18:27 ` [PULL 21/27] hw/xen: Add emulated implementation of grant table operations David Woodhouse
2023-03-07 18:27 ` David Woodhouse [this message]
2023-04-11 17:47 ` [PULL 22/27] hw/xen: Add emulated implementation of XenStore operations Peter Maydell
2023-04-11 18:07 ` Peter Maydell
2023-04-12 18:22 ` David Woodhouse
2023-04-12 18:53 ` Peter Maydell
2023-03-07 18:27 ` [PULL 23/27] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-03-07 18:27 ` [PULL 24/27] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-03-07 18:27 ` [PULL 25/27] i386/xen: Initialize Xen backends from pc_basic_device_init() for emulation David Woodhouse
2023-03-07 18:27 ` [PULL 26/27] MAINTAINERS: Add entry for Xen on KVM emulation David Woodhouse
2023-03-07 18:27 ` [PULL 27/27] docs: Update Xen-on-KVM documentation for PV disk support David Woodhouse
2023-03-07 20:20 ` [PULL 00/27] Enable PV backends with Xen/KVM emulation Philippe Mathieu-Daudé
2023-03-07 22:34 ` David Woodhouse
2023-03-07 23:26 ` Philippe Mathieu-Daudé
2023-03-09 15:18 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230307182707.2298618-23-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=ankur.a.arora@oracle.com \
--cc=anthony.perard@citrix.com \
--cc=dgilbert@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=sstabellini@kernel.org \
--cc=vikram.garhwal@amd.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).