From: Jason Andryuk <jandryuk@gmail.com>
To: qemu-devel@nongnu.org
Cc: Jason Andryuk <jandryuk@gmail.com>, Greg Kurz <groug@kaod.org>,
Christian Schoenebeck <qemu_oss@crudebyte.com>,
Stefano Stabellini <sstabellini@kernel.org>,
Anthony Perard <anthony.perard@citrix.com>,
Paul Durrant <paul@xen.org>,
xen-devel@lists.xenproject.org (open list:X86 Xen CPUs)
Subject: [PATCH] 9pfs/xen: Fix segfault on shutdown
Date: Tue, 2 May 2023 10:37:22 -0400 [thread overview]
Message-ID: <20230502143722.15613-1-jandryuk@gmail.com> (raw)
xen_9pfs_free can't use gnttabdev since it is already closed and NULL-ed
out when free is called. Do the teardown in _disconnect(). This
matches the setup done in _connect().
trace-events are also added for the XenDevOps functions.
Signed-off-by: Jason Andryuk <jandryuk@gmail.com>
---
hw/9pfs/trace-events | 5 +++++
hw/9pfs/xen-9p-backend.c | 36 +++++++++++++++++++++++-------------
2 files changed, 28 insertions(+), 13 deletions(-)
diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events
index 6c77966c0b..7b5b0b5a48 100644
--- a/hw/9pfs/trace-events
+++ b/hw/9pfs/trace-events
@@ -48,3 +48,8 @@ v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d"
v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s"
v9fs_setattr(uint16_t tag, uint8_t id, int32_t fid, int32_t valid, int32_t mode, int32_t uid, int32_t gid, int64_t size, int64_t atime_sec, int64_t mtime_sec) "tag %u id %u fid %d iattr={valid %d mode %d uid %d gid %d size %"PRId64" atime=%"PRId64" mtime=%"PRId64" }"
v9fs_setattr_return(uint16_t tag, uint8_t id) "tag %u id %u"
+
+xen_9pfs_alloc(char *name) "name %s"
+xen_9pfs_connect(char *name) "name %s"
+xen_9pfs_disconnect(char *name) "name %s"
+xen_9pfs_free(char *name) "name %s"
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 0e266c552b..c646a0b3d1 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -25,6 +25,8 @@
#include "qemu/iov.h"
#include "fsdev/qemu-fsdev.h"
+#include "trace.h"
+
#define VERSIONS "1"
#define MAX_RINGS 8
#define MAX_RING_ORDER 9
@@ -337,6 +339,8 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev)
Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
int i;
+ trace_xen_9pfs_disconnect(xendev->name);
+
for (i = 0; i < xen_9pdev->num_rings; i++) {
if (xen_9pdev->rings[i].evtchndev != NULL) {
qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev),
@@ -345,40 +349,42 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev)
xen_9pdev->rings[i].local_port);
xen_9pdev->rings[i].evtchndev = NULL;
}
- }
-}
-
-static int xen_9pfs_free(struct XenLegacyDevice *xendev)
-{
- Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
- int i;
-
- if (xen_9pdev->rings[0].evtchndev != NULL) {
- xen_9pfs_disconnect(xendev);
- }
-
- for (i = 0; i < xen_9pdev->num_rings; i++) {
if (xen_9pdev->rings[i].data != NULL) {
xen_be_unmap_grant_refs(&xen_9pdev->xendev,
xen_9pdev->rings[i].data,
xen_9pdev->rings[i].intf->ref,
(1 << xen_9pdev->rings[i].ring_order));
+ xen_9pdev->rings[i].data = NULL;
}
if (xen_9pdev->rings[i].intf != NULL) {
xen_be_unmap_grant_ref(&xen_9pdev->xendev,
xen_9pdev->rings[i].intf,
xen_9pdev->rings[i].ref);
+ xen_9pdev->rings[i].intf = NULL;
}
if (xen_9pdev->rings[i].bh != NULL) {
qemu_bh_delete(xen_9pdev->rings[i].bh);
+ xen_9pdev->rings[i].bh = NULL;
}
}
g_free(xen_9pdev->id);
+ xen_9pdev->id = NULL;
g_free(xen_9pdev->tag);
+ xen_9pdev->tag = NULL;
g_free(xen_9pdev->path);
+ xen_9pdev->path = NULL;
g_free(xen_9pdev->security_model);
+ xen_9pdev->security_model = NULL;
g_free(xen_9pdev->rings);
+ xen_9pdev->rings = NULL;
+ return;
+}
+
+static int xen_9pfs_free(struct XenLegacyDevice *xendev)
+{
+ trace_xen_9pfs_free(xendev->name);
+
return 0;
}
@@ -390,6 +396,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
V9fsState *s = &xen_9pdev->state;
QemuOpts *fsdev;
+ trace_xen_9pfs_connect(xendev->name);
+
if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
&xen_9pdev->num_rings) == -1 ||
xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
@@ -499,6 +507,8 @@ out:
static void xen_9pfs_alloc(struct XenLegacyDevice *xendev)
{
+ trace_xen_9pfs_alloc(xendev->name);
+
xenstore_write_be_str(xendev, "versions", VERSIONS);
xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
--
2.40.1
next reply other threads:[~2023-05-02 14:38 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-02 14:37 Jason Andryuk [this message]
2023-05-04 13:30 ` [PATCH] 9pfs/xen: Fix segfault on shutdown Michael Tokarev
2023-05-05 10:05 ` Christian Schoenebeck
2023-05-05 15:47 ` Jason Andryuk
2023-05-06 0:35 ` Stefano Stabellini
2023-05-11 14:37 ` Christian Schoenebeck
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230502143722.15613-1-jandryuk@gmail.com \
--to=jandryuk@gmail.com \
--cc=anthony.perard@citrix.com \
--cc=groug@kaod.org \
--cc=paul@xen.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu_oss@crudebyte.com \
--cc=sstabellini@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).