qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Kevin Wolf" <kwolf@redhat.com>,
	"Hanna Reitz" <hreitz@redhat.com>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Anthony Perard" <anthony.perard@citrix.com>,
	"Paul Durrant" <paul@xen.org>,
	"Marc-André Lureau" <marcandre.lureau@redhat.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
	"Richard Henderson" <richard.henderson@linaro.org>,
	"Eduardo Habkost" <eduardo@habkost.net>,
	"David Woodhouse" <dwmw2@infradead.org>,
	"Marcelo Tosatti" <mtosatti@redhat.com>,
	qemu-block@nongnu.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org
Subject: [PATCH 09/12] hw/xen: prevent duplicate device registrations
Date: Mon, 16 Oct 2023 16:19:06 +0100	[thread overview]
Message-ID: <20231016151909.22133-10-dwmw2@infradead.org> (raw)
In-Reply-To: <20231016151909.22133-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

Ensure that we have a XenBackendInstance for every device regardless
of whether it was "discovered" in XenStore or created directly in QEMU.

This allows the backend_list to be a source of truth about whether a
given backend exists, and allows us to reject duplicates.

This also cleans up the fact that backend drivers were calling
xen_backend_set_device() with a XenDevice immediately after calling
qdev_realize_and_unref() on it, when it wasn't theirs to play with any
more.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/block/xen-block.c         |  1 -
 hw/char/xen_console.c        |  2 +-
 hw/xen/xen-backend.c         | 78 ++++++++++++++++++++++++++----------
 hw/xen/xen-bus.c             |  8 ++++
 include/hw/xen/xen-backend.h |  3 ++
 5 files changed, 69 insertions(+), 23 deletions(-)

diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c
index a07cd7eb5d..9262338535 100644
--- a/hw/block/xen-block.c
+++ b/hw/block/xen-block.c
@@ -975,7 +975,6 @@ static void xen_block_device_create(XenBackendInstance *backend,
         goto fail;
     }
 
-    xen_backend_set_device(backend, xendev);
     return;
 
 fail:
diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c
index bd20be116c..2825b8c511 100644
--- a/hw/char/xen_console.c
+++ b/hw/char/xen_console.c
@@ -468,7 +468,7 @@ static void xen_console_device_create(XenBackendInstance *backend,
     Chardev *cd = NULL;
     struct qemu_xs_handle *xsh = xenbus->xsh;
 
-    if (qemu_strtoul(name, NULL, 10, &number)) {
+    if (qemu_strtoul(name, NULL, 10, &number) || number >= INT_MAX) {
         error_setg(errp, "failed to parse name '%s'", name);
         goto fail;
     }
diff --git a/hw/xen/xen-backend.c b/hw/xen/xen-backend.c
index b9bf70a9f5..dcb4329258 100644
--- a/hw/xen/xen-backend.c
+++ b/hw/xen/xen-backend.c
@@ -101,22 +101,28 @@ static XenBackendInstance *xen_backend_list_find(XenDevice *xendev)
     return NULL;
 }
 
-bool xen_backend_exists(const char *type, const char *name)
+static XenBackendInstance *xen_backend_lookup(const XenBackendImpl *impl, const char *name)
 {
-    const XenBackendImpl *impl = xen_backend_table_lookup(type);
     XenBackendInstance *backend;
 
-    if (!impl) {
-        return false;
-    }
-
     QLIST_FOREACH(backend, &backend_list, entry) {
         if (backend->impl == impl && !strcmp(backend->name, name)) {
-            return true;
+            return backend;
         }
     }
 
-    return false;
+    return NULL;
+}
+
+bool xen_backend_exists(const char *type, const char *name)
+{
+    const XenBackendImpl *impl = xen_backend_table_lookup(type);
+
+    if (!impl) {
+        return false;
+    }
+
+    return !!xen_backend_lookup(impl, name);
 }
 
 static void xen_backend_list_remove(XenBackendInstance *backend)
@@ -138,11 +144,10 @@ void xen_backend_device_create(XenBus *xenbus, const char *type,
     backend = g_new0(XenBackendInstance, 1);
     backend->xenbus = xenbus;
     backend->name = g_strdup(name);
-
-    impl->create(backend, opts, errp);
-
     backend->impl = impl;
     xen_backend_list_add(backend);
+
+    impl->create(backend, opts, errp);
 }
 
 XenBus *xen_backend_get_bus(XenBackendInstance *backend)
@@ -155,13 +160,6 @@ const char *xen_backend_get_name(XenBackendInstance *backend)
     return backend->name;
 }
 
-void xen_backend_set_device(XenBackendInstance *backend,
-                            XenDevice *xendev)
-{
-    g_assert(!backend->xendev);
-    backend->xendev = xendev;
-}
-
 XenDevice *xen_backend_get_device(XenBackendInstance *backend)
 {
     return backend->xendev;
@@ -178,9 +176,7 @@ bool xen_backend_try_device_destroy(XenDevice *xendev, Error **errp)
     }
 
     impl = backend->impl;
-    if (backend->xendev) {
-        impl->destroy(backend, errp);
-    }
+    impl->destroy(backend, errp);
 
     xen_backend_list_remove(backend);
     g_free(backend->name);
@@ -188,3 +184,43 @@ bool xen_backend_try_device_destroy(XenDevice *xendev, Error **errp)
 
     return true;
 }
+
+bool xen_backend_device_realized(XenDevice *xendev, Error **errp)
+{
+    XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev);
+    const char *type = xendev_class->backend ? : object_get_typename(OBJECT(xendev));
+    const XenBackendImpl *impl = xen_backend_table_lookup(type);
+    XenBackendInstance *backend;
+
+    if (!impl) {
+        return false;
+    }
+
+    backend = xen_backend_lookup(impl, xendev->name);
+    if (backend) {
+        if (backend->xendev && backend->xendev != xendev) {
+            error_setg(errp, "device %s/%s already exists", type, xendev->name);
+            return false;
+        }
+        backend->xendev = xendev;
+        return true;
+    }
+
+    backend = g_new0(XenBackendInstance, 1);
+    backend->xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev)));
+    backend->xendev = xendev;
+    backend->name = g_strdup(xendev->name);
+    backend->impl = impl;
+
+    xen_backend_list_add(backend);
+    return true;
+}
+
+void xen_backend_device_unrealized(XenDevice *xendev)
+{
+    XenBackendInstance *backend = xen_backend_list_find(xendev);
+
+    if (backend) {
+        backend->xendev = NULL;
+    }
+}
diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c
index 0da2aa219a..0b232d1f94 100644
--- a/hw/xen/xen-bus.c
+++ b/hw/xen/xen-bus.c
@@ -359,6 +359,8 @@ static void xen_bus_realize(BusState *bus, Error **errp)
 
     g_free(type);
     g_free(key);
+
+    xen_bus_enumerate(xenbus);
     return;
 
 fail:
@@ -958,6 +960,8 @@ static void xen_device_unrealize(DeviceState *dev)
 
     trace_xen_device_unrealize(type, xendev->name);
 
+    xen_backend_device_unrealized(xendev);
+
     if (xendev->exit.notify) {
         qemu_remove_exit_notifier(&xendev->exit);
         xendev->exit.notify = NULL;
@@ -1024,6 +1028,10 @@ static void xen_device_realize(DeviceState *dev, Error **errp)
         goto unrealize;
     }
 
+    if (!xen_backend_device_realized(xendev, errp)) {
+        goto unrealize;
+    }
+
     trace_xen_device_realize(type, xendev->name);
 
     xendev->xsh = qemu_xen_xs_open();
diff --git a/include/hw/xen/xen-backend.h b/include/hw/xen/xen-backend.h
index 0f01631ae7..3f1e764c51 100644
--- a/include/hw/xen/xen-backend.h
+++ b/include/hw/xen/xen-backend.h
@@ -38,4 +38,7 @@ void xen_backend_device_create(XenBus *xenbus, const char *type,
                                const char *name, QDict *opts, Error **errp);
 bool xen_backend_try_device_destroy(XenDevice *xendev, Error **errp);
 
+bool xen_backend_device_realized(XenDevice *xendev, Error **errp);
+void xen_backend_device_unrealized(XenDevice *xendev);
+
 #endif /* HW_XEN_BACKEND_H */
-- 
2.40.1



  parent reply	other threads:[~2023-10-16 15:22 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-16 15:18 [PATCH 0/12] Get Xen PV shim running in qemu David Woodhouse
2023-10-16 15:18 ` [PATCH 01/12] i386/xen: fix per-vCPU upcall vector for Xen emulation David Woodhouse
2023-10-24 12:16   ` Paul Durrant
2023-10-24 12:58     ` David Woodhouse
2023-10-16 15:18 ` [PATCH 02/12] hw/xen: select kernel mode for per-vCPU event channel upcall vector David Woodhouse
2023-10-24 12:29   ` Paul Durrant
2023-10-24 13:20     ` David Woodhouse
2023-10-16 15:19 ` [PATCH 03/12] include: update Xen public headers to Xen 4.17.2 release David Woodhouse
2023-10-24 12:30   ` Paul Durrant
2023-10-16 15:19 ` [PATCH 04/12] i386/xen: advertise XEN_HVM_CPUID_UPCALL_VECTOR in CPUID David Woodhouse
2023-10-24 12:32   ` Paul Durrant
2023-10-16 15:19 ` [PATCH 05/12] hw/xen: populate store frontend nodes with XenStore PFN/port David Woodhouse
2023-10-24 12:35   ` Paul Durrant
2023-10-24 12:53     ` David Woodhouse
2023-10-16 15:19 ` [PATCH 06/12] hw/xen: add get_frontend_path() method to XenDeviceClass David Woodhouse
2023-10-24 12:42   ` Paul Durrant
2023-10-24 12:56     ` David Woodhouse
2023-10-24 12:59       ` Paul Durrant
2023-10-24 13:29         ` David Woodhouse
2023-10-24 13:37           ` Paul Durrant
2023-10-25  8:30             ` David Woodhouse
2023-11-21 12:25           ` David Woodhouse
2023-10-16 15:19 ` [PATCH 07/12] hw/xen: update Xen console to XenDevice model David Woodhouse
2023-10-24 13:07   ` Paul Durrant
2023-10-16 15:19 ` [PATCH 08/12] hw/xen: do not repeatedly try to create a failing backend device David Woodhouse
2023-10-24 13:19   ` Paul Durrant
2023-10-16 15:19 ` David Woodhouse [this message]
2023-10-24 14:10   ` [PATCH 09/12] hw/xen: prevent duplicate device registrations Paul Durrant
2023-10-24 14:38     ` David Woodhouse
2023-10-16 15:19 ` [PATCH 10/12] hw/xen: automatically assign device index to console devices David Woodhouse
2023-10-16 15:19 ` [PATCH 11/12] hw/xen: automatically assign device index to block devices David Woodhouse
2023-10-17 10:21   ` Kevin Wolf
2023-10-17 18:02     ` David Woodhouse
2023-10-18  7:32   ` Igor Mammedov
2023-10-18  8:32     ` David Woodhouse
2023-10-23  9:30       ` Igor Mammedov
2023-10-23  9:42         ` David Woodhouse
2023-10-23  9:42           ` David Woodhouse
2023-10-23 13:45         ` Kevin Wolf
2023-10-18  8:52   ` Kevin Wolf
2023-10-18 10:52     ` David Woodhouse
2023-10-19 11:21       ` Kevin Wolf
2023-10-20 17:47       ` David Woodhouse
2023-10-18 23:13     ` David Woodhouse
2023-10-16 15:19 ` [PATCH 12/12] hw/xen: add support for Xen primary console in emulated mode David Woodhouse
2023-10-24 14:20   ` Paul Durrant
2023-10-24 15:37     ` David Woodhouse
2023-10-24 15:39       ` Paul Durrant
2023-10-24 15:49         ` David Woodhouse
2023-10-24 16:25           ` Paul Durrant
2023-10-24 16:34             ` David Woodhouse
2023-10-25  8:31               ` Paul Durrant
2023-10-25  9:00                 ` David Woodhouse
2023-10-25 10:44                   ` Paul Durrant
2023-10-24 15:24 ` [PATCH 0/12] Get Xen PV shim running in qemu Alex Bennée
2023-10-24 16:11   ` David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231016151909.22133-10-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=anthony.perard@citrix.com \
    --cc=eduardo@habkost.net \
    --cc=hreitz@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=kwolf@redhat.com \
    --cc=marcandre.lureau@redhat.com \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).