From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, stefanha@redhat.com, qemu-devel@nongnu.org
Subject: [PULL 13/33] scsi: don't lock AioContext in I/O code path
Date: Thu, 21 Dec 2023 22:23:18 +0100 [thread overview]
Message-ID: <20231221212339.164439-14-kwolf@redhat.com> (raw)
In-Reply-To: <20231221212339.164439-1-kwolf@redhat.com>
From: Stefan Hajnoczi <stefanha@redhat.com>
blk_aio_*() doesn't require the AioContext lock and the SCSI subsystem's
internal state also does not anymore.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Acked-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231204164259.1515217-4-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/scsi/scsi-disk.c | 23 -----------------------
hw/scsi/scsi-generic.c | 20 +++-----------------
2 files changed, 3 insertions(+), 40 deletions(-)
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index 6691f5edb8..2c1bbb3530 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -273,8 +273,6 @@ static void scsi_aio_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -286,7 +284,6 @@ static void scsi_aio_complete(void *opaque, int ret)
scsi_req_complete(&r->req, GOOD);
done:
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
scsi_req_unref(&r->req);
}
@@ -394,8 +391,6 @@ static void scsi_read_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -406,7 +401,6 @@ static void scsi_read_complete(void *opaque, int ret)
trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
}
scsi_read_complete_noio(r, ret);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
/* Actually issue a read to the block device. */
@@ -448,8 +442,6 @@ static void scsi_do_read_cb(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -459,7 +451,6 @@ static void scsi_do_read_cb(void *opaque, int ret)
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_do_read(opaque, ret);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
/* Read more data from scsi device into buffer. */
@@ -533,8 +524,6 @@ static void scsi_write_complete(void * opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -544,7 +533,6 @@ static void scsi_write_complete(void * opaque, int ret)
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_write_complete_noio(r, ret);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_write_data(SCSIRequest *req)
@@ -1742,8 +1730,6 @@ static void scsi_unmap_complete(void *opaque, int ret)
SCSIDiskReq *r = data->r;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -1754,7 +1740,6 @@ static void scsi_unmap_complete(void *opaque, int ret)
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
scsi_unmap_complete_noio(data, ret);
}
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
@@ -1822,8 +1807,6 @@ static void scsi_write_same_complete(void *opaque, int ret)
SCSIDiskReq *r = data->r;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -1847,7 +1830,6 @@ static void scsi_write_same_complete(void *opaque, int ret)
data->sector << BDRV_SECTOR_BITS,
&data->qiov, 0,
scsi_write_same_complete, data);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
return;
}
@@ -1857,7 +1839,6 @@ done:
scsi_req_unref(&r->req);
qemu_vfree(data->iov.iov_base);
g_free(data);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
@@ -2810,7 +2791,6 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
{
SCSIBlockReq *req = (SCSIBlockReq *)opaque;
SCSIDiskReq *r = &req->req;
- SCSIDevice *s = r->req.dev;
sg_io_hdr_t *io_hdr = &req->io_header;
if (ret == 0) {
@@ -2827,13 +2807,10 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
}
if (ret > 0) {
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
if (scsi_handle_rw_error(r, ret, true)) {
- aio_context_release(blk_get_aio_context(s->conf.blk));
scsi_req_unref(&r->req);
return;
}
- aio_context_release(blk_get_aio_context(s->conf.blk));
/* Ignore error. */
ret = 0;
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index 2417f0ad84..b7b04e1d63 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -109,15 +109,11 @@ done:
static void scsi_command_complete(void *opaque, int ret)
{
SCSIGenericReq *r = (SCSIGenericReq *)opaque;
- SCSIDevice *s = r->req.dev;
-
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
scsi_command_complete_noio(r, ret);
- aio_context_release(blk_get_aio_context(s->conf.blk));
}
static int execute_command(BlockBackend *blk,
@@ -274,14 +270,12 @@ static void scsi_read_complete(void * opaque, int ret)
SCSIDevice *s = r->req.dev;
int len;
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
- goto done;
+ return;
}
len = r->io_header.dxfer_len - r->io_header.resid;
@@ -320,7 +314,7 @@ static void scsi_read_complete(void * opaque, int ret)
r->io_header.status != GOOD ||
len == 0) {
scsi_command_complete_noio(r, 0);
- goto done;
+ return;
}
/* Snoop READ CAPACITY output to set the blocksize. */
@@ -356,9 +350,6 @@ static void scsi_read_complete(void * opaque, int ret)
req_complete:
scsi_req_data(&r->req, len);
scsi_req_unref(&r->req);
-
-done:
- aio_context_release(blk_get_aio_context(s->conf.blk));
}
/* Read more data from scsi device into buffer. */
@@ -391,14 +382,12 @@ static void scsi_write_complete(void * opaque, int ret)
trace_scsi_generic_write_complete(ret);
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
- goto done;
+ return;
}
if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
@@ -408,9 +397,6 @@ static void scsi_write_complete(void * opaque, int ret)
}
scsi_command_complete_noio(r, ret);
-
-done:
- aio_context_release(blk_get_aio_context(s->conf.blk));
}
/* Write data to a scsi device. Returns nonzero on failure.
--
2.43.0
next prev parent reply other threads:[~2023-12-21 21:24 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-21 21:23 [PULL 00/33] Block layer patches Kevin Wolf
2023-12-21 21:23 ` [PULL 01/33] nbd/server: avoid per-NBDRequest nbd_client_get/put() Kevin Wolf
2023-12-21 21:23 ` [PULL 02/33] nbd/server: only traverse NBDExport->clients from main loop thread Kevin Wolf
2023-12-21 21:23 ` [PULL 03/33] nbd/server: introduce NBDClient->lock to protect fields Kevin Wolf
2023-12-21 21:23 ` [PULL 04/33] block/file-posix: set up Linux AIO and io_uring in the current thread Kevin Wolf
2023-12-21 21:23 ` [PULL 05/33] virtio-blk: add lock to protect s->rq Kevin Wolf
2023-12-21 21:23 ` [PULL 06/33] virtio-blk: don't lock AioContext in the completion code path Kevin Wolf
2023-12-21 21:23 ` [PULL 07/33] virtio-blk: don't lock AioContext in the submission " Kevin Wolf
2023-12-21 21:23 ` [PULL 08/33] block: Fix crash when loading snapshot on inactive node Kevin Wolf
2023-12-21 21:23 ` [PULL 09/33] vl: Improve error message for conflicting -incoming and -loadvm Kevin Wolf
2023-12-21 21:23 ` [PULL 10/33] iotests: Basic tests for internal snapshots Kevin Wolf
2023-12-21 21:23 ` [PULL 11/33] scsi: only access SCSIDevice->requests from one thread Kevin Wolf
2024-01-23 16:40 ` Hanna Czenczek
2024-01-23 17:10 ` Kevin Wolf
2024-01-23 17:23 ` Hanna Czenczek
2024-01-24 12:12 ` Hanna Czenczek
2024-01-24 21:53 ` Stefan Hajnoczi
2024-01-25 9:06 ` Hanna Czenczek
2024-01-25 13:25 ` Stefan Hajnoczi
2024-01-25 17:32 ` Hanna Czenczek
2024-01-26 13:18 ` Kevin Wolf
2024-01-26 15:24 ` Hanna Czenczek
2024-01-31 20:35 ` Stefan Hajnoczi
2024-02-01 14:10 ` Hanna Czenczek
2024-02-01 14:28 ` Stefan Hajnoczi
2024-02-01 15:25 ` Hanna Czenczek
2024-02-01 15:49 ` Hanna Czenczek
2024-02-02 12:32 ` Hanna Czenczek
2024-02-06 19:32 ` Stefan Hajnoczi
2024-01-29 16:30 ` Hanna Czenczek
2024-01-31 10:17 ` Kevin Wolf
2024-02-01 9:43 ` Hanna Czenczek
2024-02-01 10:21 ` Kevin Wolf
2024-02-01 10:35 ` Hanna Czenczek
2024-01-23 17:21 ` Hanna Czenczek
2023-12-21 21:23 ` [PULL 12/33] virtio-scsi: don't lock AioContext around virtio_queue_aio_attach_host_notifier() Kevin Wolf
2023-12-21 21:23 ` Kevin Wolf [this message]
2023-12-21 21:23 ` [PULL 14/33] dma-helpers: don't lock AioContext in dma_blk_cb() Kevin Wolf
2023-12-21 21:23 ` [PULL 15/33] virtio-scsi: replace AioContext lock with tmf_bh_lock Kevin Wolf
2023-12-21 21:23 ` [PULL 16/33] scsi: assert that callbacks run in the correct AioContext Kevin Wolf
2023-12-21 21:23 ` [PULL 17/33] tests: remove aio_context_acquire() tests Kevin Wolf
2023-12-21 21:23 ` [PULL 18/33] aio: make aio_context_acquire()/aio_context_release() a no-op Kevin Wolf
2023-12-21 21:23 ` [PULL 19/33] graph-lock: remove AioContext locking Kevin Wolf
2023-12-21 21:23 ` [PULL 20/33] block: " Kevin Wolf
2023-12-21 21:23 ` [PULL 21/33] block: remove bdrv_co_lock() Kevin Wolf
2023-12-21 21:23 ` [PULL 22/33] scsi: remove AioContext locking Kevin Wolf
2023-12-21 21:23 ` [PULL 23/33] aio-wait: draw equivalence between AIO_WAIT_WHILE() and AIO_WAIT_WHILE_UNLOCKED() Kevin Wolf
2023-12-21 21:23 ` [PULL 24/33] aio: remove aio_context_acquire()/aio_context_release() API Kevin Wolf
2023-12-21 21:23 ` [PULL 25/33] docs: remove AioContext lock from IOThread docs Kevin Wolf
2023-12-21 21:23 ` [PULL 26/33] scsi: remove outdated AioContext lock comment Kevin Wolf
2023-12-21 21:23 ` [PULL 27/33] job: remove outdated AioContext locking comments Kevin Wolf
2023-12-21 21:23 ` [PULL 28/33] block: " Kevin Wolf
2023-12-21 21:23 ` [PULL 29/33] block-coroutine-wrapper: use qemu_get_current_aio_context() Kevin Wolf
2023-12-21 21:23 ` [PULL 30/33] string-output-visitor: show structs as "<omitted>" Kevin Wolf
2023-12-21 21:23 ` [PULL 31/33] qdev-properties: alias all object class properties Kevin Wolf
2023-12-21 21:23 ` [PULL 32/33] qdev: add IOThreadVirtQueueMappingList property type Kevin Wolf
2023-12-21 21:23 ` [PULL 33/33] virtio-blk: add iothread-vq-mapping parameter Kevin Wolf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231221212339.164439-14-kwolf@redhat.com \
--to=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).