From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, peter.maydell@linaro.org, qemu-devel@nongnu.org
Subject: [PULL 24/29] scsi: protect req->aiocb with AioContext lock
Date: Thu, 23 Feb 2023 19:51:41 +0100 [thread overview]
Message-ID: <20230223185146.306454-25-kwolf@redhat.com> (raw)
In-Reply-To: <20230223185146.306454-1-kwolf@redhat.com>
From: Stefan Hajnoczi <stefanha@redhat.com>
If requests are being processed in the IOThread when a SCSIDevice is
unplugged, scsi_device_purge_requests() -> scsi_req_cancel_async() races
with I/O completion callbacks. Both threads load and store req->aiocb.
This can lead to assert(r->req.aiocb == NULL) failures and undefined
behavior.
Protect r->req.aiocb with the AioContext lock to prevent the race.
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20230221212218.1378734-2-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/scsi/scsi-disk.c | 23 ++++++++++++++++-------
hw/scsi/scsi-generic.c | 11 ++++++-----
2 files changed, 22 insertions(+), 12 deletions(-)
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index d4e360850f..115584f8b9 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -273,9 +273,11 @@ static void scsi_aio_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
if (scsi_disk_req_check_error(r, ret, true)) {
goto done;
}
@@ -357,10 +359,11 @@ static void scsi_dma_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
@@ -393,10 +396,11 @@ static void scsi_read_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
@@ -446,10 +450,11 @@ static void scsi_do_read_cb(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
@@ -530,10 +535,11 @@ static void scsi_write_complete(void * opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
@@ -1737,10 +1743,11 @@ static void scsi_unmap_complete(void *opaque, int ret)
SCSIDiskReq *r = data->r;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (scsi_disk_req_check_error(r, ret, true)) {
scsi_req_unref(&r->req);
g_free(data);
@@ -1816,9 +1823,11 @@ static void scsi_write_same_complete(void *opaque, int ret)
SCSIDiskReq *r = data->r;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+
if (scsi_disk_req_check_error(r, ret, true)) {
goto done;
}
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index 92cce20a4d..ac9fa662b4 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -111,10 +111,11 @@ static void scsi_command_complete(void *opaque, int ret)
SCSIGenericReq *r = (SCSIGenericReq *)opaque;
SCSIDevice *s = r->req.dev;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
scsi_command_complete_noio(r, ret);
aio_context_release(blk_get_aio_context(s->conf.blk));
}
@@ -269,11 +270,11 @@ static void scsi_read_complete(void * opaque, int ret)
SCSIDevice *s = r->req.dev;
int len;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
-
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
goto done;
@@ -386,11 +387,11 @@ static void scsi_write_complete(void * opaque, int ret)
trace_scsi_generic_write_complete(ret);
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
-
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
goto done;
--
2.39.2
next prev parent reply other threads:[~2023-02-23 18:54 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-23 18:51 [PULL 00/29] Block layer patches Kevin Wolf
2023-02-23 18:51 ` [PULL 01/29] block: Make bdrv_can_set_read_only() static Kevin Wolf
2023-02-23 18:51 ` [PULL 02/29] mirror: Fix access of uninitialised fields during start Kevin Wolf
2023-02-23 18:51 ` [PULL 03/29] block: Mark bdrv_co_truncate() and callers GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 04/29] block: Mark bdrv_co_block_status() " Kevin Wolf
2023-02-23 18:51 ` [PULL 05/29] block: Mark bdrv_co_ioctl() " Kevin Wolf
2023-02-23 18:51 ` [PULL 06/29] block/qed: add missing graph rdlock in qed_need_check_timer_entry Kevin Wolf
2023-02-23 18:51 ` [PULL 07/29] block: Mark bdrv_co_flush() and callers GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 08/29] block: Mark bdrv_co_pdiscard() " Kevin Wolf
2023-02-23 18:51 ` [PULL 09/29] block: Mark bdrv_co_pwrite_zeroes() " Kevin Wolf
2023-02-23 18:51 ` [PULL 10/29] block: Mark read/write in block/io.c GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 11/29] block: Mark public read/write functions GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 12/29] block: Mark bdrv_co_pwrite_sync() and callers GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 13/29] block: Mark bdrv_co_do_pwrite_zeroes() GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 14/29] block: Mark bdrv_co_copy_range() GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 15/29] block: Mark preadv_snapshot/snapshot_block_status GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 16/29] block: Mark bdrv_co_create() and callers GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 17/29] block: Mark bdrv_co_io_(un)plug() " Kevin Wolf
2023-02-23 18:51 ` [PULL 18/29] block: Mark bdrv_co_is_inserted() " Kevin Wolf
2023-02-23 18:51 ` [PULL 19/29] block: Mark bdrv_co_eject/lock_medium() " Kevin Wolf
2023-02-23 18:51 ` [PULL 20/29] block: Mark bdrv_(un)register_buf() GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 21/29] block: Mark bdrv_co_delete_file() and callers GRAPH_RDLOCK Kevin Wolf
2023-02-23 18:51 ` [PULL 22/29] block: Mark bdrv_*_dirty_bitmap() " Kevin Wolf
2023-02-23 18:51 ` [PULL 23/29] block: Mark bdrv_co_refresh_total_sectors() " Kevin Wolf
2023-02-23 18:51 ` Kevin Wolf [this message]
2023-02-23 18:51 ` [PULL 25/29] dma-helpers: prevent dma_blk_cb() vs dma_aio_cancel() race Kevin Wolf
2023-02-23 18:51 ` [PULL 26/29] virtio-scsi: reset SCSI devices from main loop thread Kevin Wolf
2023-02-23 18:51 ` [PULL 27/29] block/rbd: Remove redundant stack variable passphrase_len Kevin Wolf
2023-02-23 18:51 ` [PULL 28/29] block/rbd: Add luks-any encryption opening option Kevin Wolf
2023-02-23 18:51 ` [PULL 29/29] block/rbd: Add support for layered encryption Kevin Wolf
2023-02-24 18:50 ` [PULL 00/29] Block layer patches Peter Maydell
2023-02-24 21:35 ` Philippe Mathieu-Daudé
2023-02-27 9:12 ` Thomas Huth
2023-02-27 11:22 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230223185146.306454-25-kwolf@redhat.com \
--to=kwolf@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).