qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>, Paul Durrant <paul@xen.org>,
	Joao Martins <joao.m.martins@oracle.com>,
	Ankur Arora <ankur.a.arora@oracle.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	vikram.garhwal@amd.com,
	Anthony Perard <anthony.perard@citrix.com>,
	xen-devel@lists.xenproject.org
Subject: [RFC PATCH v1 22/25] hw/xen: Add emulated implementation of XenStore operations
Date: Thu,  2 Mar 2023 15:34:32 +0000	[thread overview]
Message-ID: <20230302153435.1170111-23-dwmw2@infradead.org> (raw)
In-Reply-To: <20230302153435.1170111-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

Now that we have an internal implementation of XenStore, we can populate
the xenstore_backend_ops to allow PV backends to talk to it.

Watches can't be processed with immediate callbacks because that would
call back into XenBus code recursively. Defer them to a QEMUBH to be run
as appropriate from the main loop. We use a QEMUBH per XS handle, and it
walks all the watches (there shouldn't be many per handle) to fire any
which have pending events. We *could* have done it differently but this
allows us to use the same struct watch_event as we have for the guest
side, and keeps things relatively simple.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/i386/kvm/xen_xenstore.c | 273 ++++++++++++++++++++++++++++++++++++-
 1 file changed, 269 insertions(+), 4 deletions(-)

diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index bab40d1a04..028f80499e 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -49,7 +49,7 @@ struct XenXenstoreState {
     /*< public >*/
 
     XenstoreImplState *impl;
-    GList *watch_events;
+    GList *watch_events; /* for the guest */
 
     MemoryRegion xenstore_page;
     struct xenstore_domain_interface *xs;
@@ -73,6 +73,8 @@ struct XenXenstoreState *xen_xenstore_singleton;
 static void xen_xenstore_event(void *opaque);
 static void fire_watch_cb(void *opaque, const char *path, const char *token);
 
+static struct xenstore_backend_ops emu_xenstore_backend_ops;
+
 static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState *s,
                                                 GList *perms,
                                                 const char *relpath,
@@ -169,6 +171,8 @@ static void xen_xenstore_realize(DeviceState *dev, Error **errp)
     relpath_printf(s, perms, "feature", "%s", "");
 
     g_list_free_full(perms, g_free);
+
+    xen_xenstore_ops = &emu_xenstore_backend_ops;
 }
 
 static bool xen_xenstore_is_needed(void *opaque)
@@ -1305,6 +1309,15 @@ struct watch_event {
     char *token;
 };
 
+static void free_watch_event(struct watch_event *ev)
+{
+    if (ev) {
+        g_free(ev->path);
+        g_free(ev->token);
+        g_free(ev);
+    }
+}
+
 static void queue_watch(XenXenstoreState *s, const char *path,
                         const char *token)
 {
@@ -1351,9 +1364,7 @@ static void process_watch_events(XenXenstoreState *s)
     deliver_watch(s, ev->path, ev->token);
 
     s->watch_events = g_list_remove(s->watch_events, ev);
-    g_free(ev->path);
-    g_free(ev->token);
-    g_free(ev);
+    free_watch_event(ev);
 }
 
 static void xen_xenstore_event(void *opaque)
@@ -1443,3 +1454,257 @@ int xen_xenstore_reset(void)
 
     return 0;
 }
+
+struct qemu_xs_handle {
+    XenstoreImplState *impl;
+    GList *watches;
+    QEMUBH *watch_bh;
+};
+
+struct qemu_xs_watch {
+    struct qemu_xs_handle *h;
+    char *path;
+    xs_watch_fn fn;
+    void *opaque;
+    GList *events;
+};
+
+static char *xs_be_get_domain_path(struct qemu_xs_handle *h, unsigned int domid)
+{
+    return g_strdup_printf("/local/domain/%u", domid);
+}
+
+static char **xs_be_directory(struct qemu_xs_handle *h, xs_transaction_t t,
+                              const char *path, unsigned int *num)
+{
+    GList *items = NULL, *l;
+    unsigned int i = 0;
+    char **items_ret;
+    int err;
+
+    err = xs_impl_directory(h->impl, DOMID_QEMU, t, path, NULL, &items);
+    if (err) {
+        errno = err;
+        return NULL;
+    }
+
+    items_ret = g_new0(char *, g_list_length(items) + 1);
+    *num = 0;
+    for (l = items; l; l = l->next) {
+        items_ret[i++] = l->data;
+        (*num)++;
+    }
+    g_list_free(items);
+    return items_ret;
+}
+
+static void *xs_be_read(struct qemu_xs_handle *h, xs_transaction_t t,
+                        const char *path, unsigned int *len)
+{
+    GByteArray *data = g_byte_array_new();
+    bool free_segment = false;
+    int err;
+
+    err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data);
+    if (err) {
+        free_segment = true;
+        errno = err;
+    } else {
+        if (len) {
+            *len = data->len;
+        }
+        /* The xen-bus-helper code expects to get NUL terminated string! */
+        g_byte_array_append(data, (void *)"", 1);
+    }
+
+    return g_byte_array_free(data, free_segment);
+}
+
+static bool xs_be_write(struct qemu_xs_handle *h, xs_transaction_t t,
+                        const char *path, const void *data, unsigned int len)
+{
+    GByteArray *gdata = g_byte_array_new();
+    int err;
+
+    g_byte_array_append(gdata, data, len);
+    err = xs_impl_write(h->impl, DOMID_QEMU, t, path, gdata);
+    g_byte_array_unref(gdata);
+    if (err) {
+        errno = err;
+        return false;
+    }
+    return true;
+}
+
+static bool xs_be_create(struct qemu_xs_handle *h, xs_transaction_t t,
+                         unsigned int owner, unsigned int domid,
+                         unsigned int perms, const char *path)
+{
+    g_autoptr(GByteArray) data = g_byte_array_new();
+    GList *perms_list = NULL;
+    int err;
+
+    /* mkdir does this */
+    err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data);
+    if (err == ENOENT) {
+        err = xs_impl_write(h->impl, DOMID_QEMU, t, path, data);
+    }
+    if (err) {
+        errno = err;
+        return false;
+    }
+
+    perms_list = g_list_append(perms_list,
+                               xs_perm_as_string(XS_PERM_NONE, owner));
+    perms_list = g_list_append(perms_list,
+                               xs_perm_as_string(perms, domid));
+
+    err = xs_impl_set_perms(h->impl, DOMID_QEMU, t, path, perms_list);
+    g_list_free_full(perms_list, g_free);
+    if (err) {
+        errno = err;
+        return false;
+    }
+    return true;
+}
+
+static bool xs_be_destroy(struct qemu_xs_handle *h, xs_transaction_t t,
+                          const char *path)
+{
+    int err = xs_impl_rm(h->impl, DOMID_QEMU, t, path);
+    if (err) {
+        errno = err;
+        return false;
+    }
+    return true;
+}
+
+static void be_watch_bh(void *_h)
+{
+    struct qemu_xs_handle *h = _h;
+    GList *l;
+
+    for (l = h->watches; l; l = l->next) {
+        struct qemu_xs_watch *w = l->data;
+
+        while (w->events) {
+            struct watch_event *ev = w->events->data;
+
+            w->fn(w->opaque, ev->path);
+
+            w->events = g_list_remove(w->events, ev);
+            free_watch_event(ev);
+        }
+    }
+}
+
+static void xs_be_watch_cb(void *opaque, const char *path, const char *token)
+{
+    struct watch_event *ev = g_new0(struct watch_event, 1);
+    struct qemu_xs_watch *w = opaque;
+
+    /* We don't care about the token */
+    ev->path = g_strdup(path);
+    w->events = g_list_append(w->events, ev);
+
+    qemu_bh_schedule(w->h->watch_bh);
+}
+
+static struct qemu_xs_watch *xs_be_watch(struct qemu_xs_handle *h,
+                                         const char *path, xs_watch_fn fn,
+                                         void *opaque)
+{
+    struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1);
+    int err;
+
+    w->h = h;
+    w->fn = fn;
+    w->opaque = opaque;
+
+    err = xs_impl_watch(h->impl, DOMID_QEMU, path, NULL, xs_be_watch_cb, w);
+    if (err) {
+        errno = err;
+        g_free(w);
+        return NULL;
+    }
+
+    w->path = g_strdup(path);
+    h->watches = g_list_append(h->watches, w);
+    return w;
+}
+
+static void xs_be_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w)
+{
+    xs_impl_unwatch(h->impl, DOMID_QEMU, w->path, NULL, xs_be_watch_cb, w);
+
+    h->watches = g_list_remove(h->watches, w);
+    g_list_free_full(w->events, (GDestroyNotify)free_watch_event);
+    g_free(w->path);
+    g_free(w);
+}
+
+static xs_transaction_t xs_be_transaction_start(struct qemu_xs_handle *h)
+{
+    unsigned int new_tx = XBT_NULL;
+    int err = xs_impl_transaction_start(h->impl, DOMID_QEMU, &new_tx);
+    if (err) {
+        errno = err;
+        return XBT_NULL;
+    }
+    return new_tx;
+}
+
+static bool xs_be_transaction_end(struct qemu_xs_handle *h, xs_transaction_t t,
+                                  bool abort)
+{
+    int err = xs_impl_transaction_end(h->impl, DOMID_QEMU, t, !abort);
+    if (err) {
+        errno = err;
+        return false;
+    }
+    return true;
+}
+
+static struct qemu_xs_handle *xs_be_open(void)
+{
+    XenXenstoreState *s = xen_xenstore_singleton;
+    struct qemu_xs_handle *h;
+
+    if (!s && !s->impl) {
+        errno = -ENOSYS;
+        return NULL;
+    }
+
+    h = g_new0(struct qemu_xs_handle, 1);
+    h->impl = s->impl;
+
+    h->watch_bh = aio_bh_new(qemu_get_aio_context(), be_watch_bh, h);
+
+    return h;
+}
+
+static void xs_be_close(struct qemu_xs_handle *h)
+{
+    while (h->watches) {
+        struct qemu_xs_watch *w = h->watches->data;
+        xs_be_unwatch(h, w);
+    }
+
+    qemu_bh_delete(h->watch_bh);
+    g_free(h);
+}
+
+static struct xenstore_backend_ops emu_xenstore_backend_ops = {
+    .open = xs_be_open,
+    .close = xs_be_close,
+    .get_domain_path = xs_be_get_domain_path,
+    .directory = xs_be_directory,
+    .read = xs_be_read,
+    .write = xs_be_write,
+    .create = xs_be_create,
+    .destroy = xs_be_destroy,
+    .watch = xs_be_watch,
+    .unwatch = xs_be_unwatch,
+    .transaction_start = xs_be_transaction_start,
+    .transaction_end = xs_be_transaction_end,
+};
-- 
2.39.0



  parent reply	other threads:[~2023-03-02 15:37 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-02 15:34 [RFC PATCH v1 00/25] Enable PV backends with Xen/KVM emulation David Woodhouse
2023-03-02 15:34 ` [RFC PATCH v1 01/25] hw/xen: Add xenstore wire implementation and implementation stubs David Woodhouse
2023-03-07 10:55   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 02/25] hw/xen: Add basic XenStore tree walk and write/read/directory support David Woodhouse
2023-03-07 11:14   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 03/25] hw/xen: Implement XenStore watches David Woodhouse
2023-03-07 11:29   ` Paul Durrant
2023-03-07 12:20     ` David Woodhouse
2023-03-02 15:34 ` [RFC PATCH v1 04/25] hw/xen: Implement XenStore transactions David Woodhouse
2023-03-07 13:16   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 05/25] hw/xen: Watches on " David Woodhouse
2023-03-07 13:32   ` Paul Durrant
2023-03-07 13:37     ` David Woodhouse
2023-03-02 15:34 ` [RFC PATCH v1 06/25] hw/xen: Implement XenStore permissions David Woodhouse
2023-03-07 13:40   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 07/25] hw/xen: Implement core serialize/deserialize methods for xenstore_impl David Woodhouse
2023-03-07 16:33   ` David Woodhouse
2023-03-07 16:39     ` Paul Durrant
2023-03-07 16:45       ` Paul Durrant
2023-03-07 16:52       ` David Woodhouse
2023-03-07 16:59         ` Paul Durrant
2023-03-07 17:00           ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 08/25] hw/xen: Create initial XenStore nodes David Woodhouse
2023-03-07 13:52   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 09/25] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-03-07 14:04   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 10/25] hw/xen: Add gnttab " David Woodhouse
2023-03-07 14:22   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 11/25] hw/xen: Pass grant ref to gnttab unmap operation David Woodhouse
2023-03-07 14:30   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 12/25] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-03-07 14:40   ` Paul Durrant
2023-03-07 14:48     ` David Woodhouse
2023-03-07 14:54       ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 13/25] hw/xen: Add xenstore " David Woodhouse
2023-03-07 14:44   ` Paul Durrant
2023-03-07 14:52     ` David Woodhouse
2023-03-07 14:55       ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 14/25] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-03-07 14:47   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 15/25] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers David Woodhouse
2023-03-07 14:48   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 16/25] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-03-07 14:58   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 17/25] hw/xen: Build PV backend drivers for CONFIG_XEN_BUS David Woodhouse
2023-03-07 15:42   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 18/25] hw/xen: Avoid crash when backend watch fires too early David Woodhouse
2023-03-07 15:43   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 19/25] hw/xen: Only advertise ring-page-order for xen-block if gnttab supports it David Woodhouse
2023-03-07 15:48   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 20/25] hw/xen: Hook up emulated implementation for event channel operations David Woodhouse
2023-03-07 15:50   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 21/25] hw/xen: Add emulated implementation of grant table operations David Woodhouse
2023-03-07 16:07   ` Paul Durrant
2023-03-07 16:16     ` David Woodhouse
2023-03-02 15:34 ` David Woodhouse [this message]
2023-03-07 16:21   ` [RFC PATCH v1 22/25] hw/xen: Add emulated implementation of XenStore operations Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 23/25] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-03-07 16:26   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 24/25] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-03-07 16:29   ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 25/25] i386/xen: Initialize Xen backends from pc_basic_device_init() for emulation David Woodhouse
2023-03-07 16:31   ` Paul Durrant
2023-03-07 16:21 ` [RFC PATCH v1 26/25] MAINTAINERS: Add entry for Xen on KVM emulation David Woodhouse
2023-03-07 16:32   ` Paul Durrant
2023-03-07 16:22 ` [RFC PATCH v1 27/25] docs: Update Xen-on-KVM documentation for PV disk support David Woodhouse
2023-03-07 16:33   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230302153435.1170111-23-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=anthony.perard@citrix.com \
    --cc=joao.m.martins@oracle.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=sstabellini@kernel.org \
    --cc=vikram.garhwal@amd.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).