From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Hanna Reitz" <hreitz@redhat.com>, "Paul Durrant" <paul@xen.org>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Alberto Garcia" <berto@igalia.com>,
"Emanuele Giuseppe Esposito" <eesposit@redhat.com>,
"John Snow" <jsnow@redhat.com>, "Kevin Wolf" <kwolf@redhat.com>,
"Eric Blake" <eblake@redhat.com>,
"Wen Congyang" <wencongyang2@huawei.com>,
qemu-block@nongnu.org, xen-devel@lists.xenproject.org,
"Coiby Xu" <Coiby.Xu@gmail.com>,
"Stefan Hajnoczi" <stefanha@redhat.com>,
"Eduardo Habkost" <eduardo@habkost.net>,
"Xie Changlong" <xiechanglong.d@gmail.com>,
"Ari Sundholm" <ari@tuxera.com>,
"Li Zhijian" <lizhijian@fujitsu.com>,
"Cleber Rosa" <crosa@redhat.com>,
"Juan Quintela" <quintela@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Daniel P. Berrangé" <berrange@redhat.com>,
"Jason Wang" <jasowang@redhat.com>,
"Vladimir Sementsov-Ogievskiy" <vsementsov@yandex-team.ru>,
"Zhang Chen" <chen.zhang@intel.com>,
"Peter Xu" <peterx@redhat.com>,
"Anthony Perard" <anthony.perard@citrix.com>,
"Stefano Stabellini" <sstabellini@kernel.org>,
"Leonardo Bras" <leobras@redhat.com>,
"Pavel Dovgalyuk" <pavel.dovgaluk@ispras.ru>,
"Fam Zheng" <fam@euphon.net>, "Fabiano Rosas" <farosas@suse.de>
Subject: [PATCH 01/12] virtio-scsi: replace AioContext lock with tmf_bh_lock
Date: Wed, 29 Nov 2023 14:55:42 -0500 [thread overview]
Message-ID: <20231129195553.942921-2-stefanha@redhat.com> (raw)
In-Reply-To: <20231129195553.942921-1-stefanha@redhat.com>
Protect the Task Management Function BH state with a lock. The TMF BH
runs in the main loop thread. An IOThread might process a TMF at the
same time as the TMF BH is running. Therefore tmf_bh_list and tmf_bh
must be protected by a lock.
Run TMF request completion in the IOThread using aio_wait_bh_oneshot().
This avoids more locking to protect the virtqueue and SCSI layer state.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
include/hw/virtio/virtio-scsi.h | 3 +-
hw/scsi/virtio-scsi.c | 62 ++++++++++++++++++++++-----------
2 files changed, 43 insertions(+), 22 deletions(-)
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 779568ab5d..da8cb928d9 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -85,8 +85,9 @@ struct VirtIOSCSI {
/*
* TMFs deferred to main loop BH. These fields are protected by
- * virtio_scsi_acquire().
+ * tmf_bh_lock.
*/
+ QemuMutex tmf_bh_lock;
QEMUBH *tmf_bh;
QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list;
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 9c751bf296..4f8d35facc 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -123,6 +123,30 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
virtio_scsi_free_req(req);
}
+static void virtio_scsi_complete_req_bh(void *opaque)
+{
+ VirtIOSCSIReq *req = opaque;
+
+ virtio_scsi_complete_req(req);
+}
+
+/*
+ * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
+ * thread cannot touch the virtqueue since that could race with an IOThread.
+ */
+static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
+{
+ VirtIOSCSI *s = req->dev;
+
+ if (!s->ctx || s->ctx == qemu_get_aio_context()) {
+ /* No need to schedule a BH when there is no IOThread */
+ virtio_scsi_complete_req(req);
+ } else {
+ /* Run request completion in the IOThread */
+ aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
+ }
+}
+
static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
{
virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
@@ -338,10 +362,7 @@ static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
out:
object_unref(OBJECT(d));
-
- virtio_scsi_acquire(s);
- virtio_scsi_complete_req(req);
- virtio_scsi_release(s);
+ virtio_scsi_complete_req_from_main_loop(req);
}
/* Some TMFs must be processed from the main loop thread */
@@ -354,18 +375,16 @@ static void virtio_scsi_do_tmf_bh(void *opaque)
GLOBAL_STATE_CODE();
- virtio_scsi_acquire(s);
+ WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
+ QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
+ QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
+ QTAILQ_INSERT_TAIL(&reqs, req, next);
+ }
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
- QTAILQ_INSERT_TAIL(&reqs, req, next);
+ qemu_bh_delete(s->tmf_bh);
+ s->tmf_bh = NULL;
}
- qemu_bh_delete(s->tmf_bh);
- s->tmf_bh = NULL;
-
- virtio_scsi_release(s);
-
QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
QTAILQ_REMOVE(&reqs, req, next);
virtio_scsi_do_one_tmf_bh(req);
@@ -379,8 +398,7 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
GLOBAL_STATE_CODE();
- virtio_scsi_acquire(s);
-
+ /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
if (s->tmf_bh) {
qemu_bh_delete(s->tmf_bh);
s->tmf_bh = NULL;
@@ -393,19 +411,19 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
virtio_scsi_complete_req(req);
}
-
- virtio_scsi_release(s);
}
static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
{
VirtIOSCSI *s = req->dev;
- QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
+ WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
+ QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
- if (!s->tmf_bh) {
- s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
- qemu_bh_schedule(s->tmf_bh);
+ if (!s->tmf_bh) {
+ s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
+ qemu_bh_schedule(s->tmf_bh);
+ }
}
}
@@ -1235,6 +1253,7 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
Error *err = NULL;
QTAILQ_INIT(&s->tmf_bh_list);
+ qemu_mutex_init(&s->tmf_bh_lock);
virtio_scsi_common_realize(dev,
virtio_scsi_handle_ctrl,
@@ -1277,6 +1296,7 @@ static void virtio_scsi_device_unrealize(DeviceState *dev)
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
virtio_scsi_common_unrealize(dev);
+ qemu_mutex_destroy(&s->tmf_bh_lock);
}
static Property virtio_scsi_properties[] = {
--
2.42.0
next prev parent reply other threads:[~2023-11-29 19:56 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-29 19:55 [PATCH 00/12] aio: remove AioContext lock Stefan Hajnoczi
2023-11-29 19:55 ` Stefan Hajnoczi [this message]
2023-11-30 15:25 ` [PATCH 01/12] virtio-scsi: replace AioContext lock with tmf_bh_lock Eric Blake
2023-12-04 14:49 ` Stefan Hajnoczi
2023-12-04 12:35 ` Kevin Wolf
2023-12-04 12:46 ` Kevin Wolf
2023-12-04 14:51 ` Stefan Hajnoczi
2023-11-29 19:55 ` [PATCH 02/12] tests: remove aio_context_acquire() tests Stefan Hajnoczi
2023-11-30 15:29 ` Eric Blake
2023-12-04 12:54 ` Kevin Wolf
2023-11-29 19:55 ` [PATCH 03/12] aio: make aio_context_acquire()/aio_context_release() a no-op Stefan Hajnoczi
2023-11-30 18:10 ` Eric Blake
2023-12-04 13:18 ` Kevin Wolf
2023-11-29 19:55 ` [PATCH 04/12] graph-lock: remove AioContext locking Stefan Hajnoczi
2023-11-30 19:33 ` Eric Blake
2023-12-04 13:21 ` Kevin Wolf
2023-11-29 19:55 ` [PATCH 05/12] block: " Stefan Hajnoczi
2023-11-30 9:12 ` Paul Durrant
2023-11-30 21:31 ` Eric Blake
2023-12-04 15:28 ` Stefan Hajnoczi
2023-12-04 14:33 ` Kevin Wolf
2023-12-04 15:17 ` Stefan Hajnoczi
2023-11-29 19:55 ` [PATCH 06/12] scsi: " Stefan Hajnoczi
2023-11-30 21:36 ` Eric Blake
2023-12-04 12:23 ` Kevin Wolf
2023-12-04 15:28 ` Stefan Hajnoczi
2023-11-29 19:55 ` [PATCH 07/12] aio-wait: draw equivalence between AIO_WAIT_WHILE() and AIO_WAIT_WHILE_UNLOCKED() Stefan Hajnoczi
2023-11-30 21:39 ` Eric Blake
2023-11-29 19:55 ` [PATCH 08/12] aio: remove aio_context_acquire()/aio_context_release() API Stefan Hajnoczi
2023-11-30 21:42 ` Eric Blake
2023-11-29 19:55 ` [PATCH 09/12] docs: remove AioContext lock from IOThread docs Stefan Hajnoczi
2023-11-30 22:38 ` Eric Blake
2023-11-29 19:55 ` [PATCH 10/12] scsi: remove outdated AioContext lock comment Stefan Hajnoczi
2023-11-30 22:39 ` Eric Blake
2023-11-29 19:55 ` [PATCH 11/12] job: remove outdated AioContext locking comments Stefan Hajnoczi
2023-12-01 17:50 ` Eric Blake
2023-11-29 19:55 ` [PATCH 12/12] block: " Stefan Hajnoczi
2023-12-01 18:41 ` Eric Blake
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231129195553.942921-2-stefanha@redhat.com \
--to=stefanha@redhat.com \
--cc=Coiby.Xu@gmail.com \
--cc=anthony.perard@citrix.com \
--cc=ari@tuxera.com \
--cc=berrange@redhat.com \
--cc=berto@igalia.com \
--cc=chen.zhang@intel.com \
--cc=crosa@redhat.com \
--cc=eblake@redhat.com \
--cc=eduardo@habkost.net \
--cc=eesposit@redhat.com \
--cc=fam@euphon.net \
--cc=farosas@suse.de \
--cc=hreitz@redhat.com \
--cc=jasowang@redhat.com \
--cc=jsnow@redhat.com \
--cc=kwolf@redhat.com \
--cc=leobras@redhat.com \
--cc=lizhijian@fujitsu.com \
--cc=mst@redhat.com \
--cc=paul@xen.org \
--cc=pavel.dovgaluk@ispras.ru \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=sstabellini@kernel.org \
--cc=vsementsov@yandex-team.ru \
--cc=wencongyang2@huawei.com \
--cc=xen-devel@lists.xenproject.org \
--cc=xiechanglong.d@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).