From: Hanna Czenczek <hreitz@redhat.com>
To: qemu-block@nongnu.org
Cc: qemu-devel@nongnu.org, "Hanna Czenczek" <hreitz@redhat.com>,
"Kevin Wolf" <kwolf@redhat.com>,
"Stefan Hajnoczi" <stefanha@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Richard W . M . Jones" <rjones@redhat.com>,
"Ilya Dryomov" <idryomov@gmail.com>,
"Peter Lieven" <pl@dlhnet.de>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Fam Zheng" <fam@euphon.net>,
"Ronnie Sahlberg" <ronniesahlberg@gmail.com>
Subject: [PATCH v2 09/19] nvme: Note in which AioContext some functions run
Date: Mon, 10 Nov 2025 16:48:44 +0100 [thread overview]
Message-ID: <20251110154854.151484-10-hreitz@redhat.com> (raw)
In-Reply-To: <20251110154854.151484-1-hreitz@redhat.com>
Sprinkle comments throughout block/nvme.c noting for some functions
(where it may not be obvious) that they require a certain AioContext, or
in which AioContext they do happen to run (for callbacks, BHs, event
notifiers).
Suggested-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
---
block/nvme.c | 43 ++++++++++++++++++++++++++++++++++---------
1 file changed, 34 insertions(+), 9 deletions(-)
diff --git a/block/nvme.c b/block/nvme.c
index b8262ebfd9..919e14cef9 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -65,6 +65,7 @@ typedef struct {
} NVMeQueue;
typedef struct {
+ /* Called from nvme_process_completion() in the BDS's main AioContext */
BlockCompletionFunc *cb;
void *opaque;
int cid;
@@ -84,6 +85,7 @@ typedef struct {
uint8_t *prp_list_pages;
/* Fields protected by @lock */
+ /* Coroutines in this queue are woken in their own context */
CoQueue free_req_queue;
NVMeQueue sq, cq;
int cq_phase;
@@ -92,7 +94,7 @@ typedef struct {
int need_kick;
int inflight;
- /* Thread-safe, no lock necessary */
+ /* Thread-safe, no lock necessary; runs in the BDS's main context */
QEMUBH *completion_bh;
} NVMeQueuePair;
@@ -206,11 +208,13 @@ static void nvme_free_queue_pair(NVMeQueuePair *q)
g_free(q);
}
+/* Runs in the BDS's main AioContext */
static void nvme_free_req_queue_cb(void *opaque)
{
NVMeQueuePair *q = opaque;
qemu_mutex_lock(&q->lock);
+ /* qemu_co_enter_next() wakes the coroutine in its own AioContext */
while (q->free_req_head != -1 &&
qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
/* Retry waiting requests */
@@ -281,7 +285,7 @@ fail:
return NULL;
}
-/* With q->lock */
+/* With q->lock, must be run in the BDS's main AioContext */
static void nvme_kick(NVMeQueuePair *q)
{
BDRVNVMeState *s = q->s;
@@ -308,7 +312,10 @@ static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
return req;
}
-/* Return a free request element if any, otherwise return NULL. */
+/*
+ * Return a free request element if any, otherwise return NULL.
+ * May be run from any AioContext.
+ */
static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
{
QEMU_LOCK_GUARD(&q->lock);
@@ -321,6 +328,7 @@ static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
/*
* Wait for a free request to become available if necessary, then
* return it.
+ * May be called in any AioContext.
*/
static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
{
@@ -328,20 +336,21 @@ static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
while (q->free_req_head == -1) {
trace_nvme_free_req_queue_wait(q->s, q->index);
+ /* nvme_free_req_queue_cb() wakes us in our own AioContext */
qemu_co_queue_wait(&q->free_req_queue, &q->lock);
}
return nvme_get_free_req_nofail_locked(q);
}
-/* With q->lock */
+/* With q->lock, may be called in any AioContext */
static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
{
req->free_req_next = q->free_req_head;
q->free_req_head = req - q->reqs;
}
-/* With q->lock */
+/* With q->lock, may be called in any AioContext */
static void nvme_wake_free_req_locked(NVMeQueuePair *q)
{
if (!qemu_co_queue_empty(&q->free_req_queue)) {
@@ -350,7 +359,7 @@ static void nvme_wake_free_req_locked(NVMeQueuePair *q)
}
}
-/* Insert a request in the freelist and wake waiters */
+/* Insert a request in the freelist and wake waiters (from any AioContext) */
static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req)
{
qemu_mutex_lock(&q->lock);
@@ -381,7 +390,7 @@ static inline int nvme_translate_error(const NvmeCqe *c)
}
}
-/* With q->lock */
+/* With q->lock, must be run in the BDS's main AioContext */
static bool nvme_process_completion(NVMeQueuePair *q)
{
BDRVNVMeState *s = q->s;
@@ -451,6 +460,7 @@ static bool nvme_process_completion(NVMeQueuePair *q)
return progress;
}
+/* As q->completion_bh, runs in the BDS's main AioContext */
static void nvme_process_completion_bh(void *opaque)
{
NVMeQueuePair *q = opaque;
@@ -481,6 +491,7 @@ static void nvme_trace_command(const NvmeCmd *cmd)
}
}
+/* Must be run in the BDS's main AioContext */
static void nvme_kick_and_check_completions(void *opaque)
{
NVMeQueuePair *q = opaque;
@@ -490,6 +501,7 @@ static void nvme_kick_and_check_completions(void *opaque)
nvme_process_completion(q);
}
+/* Runs in nvme_submit_command()'s AioContext */
static void nvme_deferred_fn(void *opaque)
{
NVMeQueuePair *q = opaque;
@@ -502,6 +514,7 @@ static void nvme_deferred_fn(void *opaque)
}
}
+/* May be run in any AioContext */
static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
NvmeCmd *cmd, BlockCompletionFunc cb,
void *opaque)
@@ -523,6 +536,7 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
defer_call(nvme_deferred_fn, q);
}
+/* Put into NVMeRequest.cb, so runs in the BDS's main AioContext */
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
{
int *pret = opaque;
@@ -530,6 +544,7 @@ static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
aio_wait_kick();
}
+/* Must be run in the BDS's or qemu's main AioContext */
static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
{
BDRVNVMeState *s = bs->opaque;
@@ -638,6 +653,7 @@ out:
return ret;
}
+/* Must be run in the BDS's main AioContext */
static void nvme_poll_queue(NVMeQueuePair *q)
{
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
@@ -660,6 +676,7 @@ static void nvme_poll_queue(NVMeQueuePair *q)
qemu_mutex_unlock(&q->lock);
}
+/* Must be run in the BDS's main AioContext */
static void nvme_poll_queues(BDRVNVMeState *s)
{
int i;
@@ -669,6 +686,7 @@ static void nvme_poll_queues(BDRVNVMeState *s)
}
}
+/* Run as an event notifier in the BDS's main AioContext */
static void nvme_handle_event(EventNotifier *n)
{
BDRVNVMeState *s = container_of(n, BDRVNVMeState,
@@ -722,6 +740,7 @@ out_error:
return false;
}
+/* Run as an event notifier in the BDS's main AioContext */
static bool nvme_poll_cb(void *opaque)
{
EventNotifier *e = opaque;
@@ -745,6 +764,7 @@ static bool nvme_poll_cb(void *opaque)
return false;
}
+/* Run as an event notifier in the BDS's main AioContext */
static void nvme_poll_ready(EventNotifier *e)
{
BDRVNVMeState *s = container_of(e, BDRVNVMeState,
@@ -1050,7 +1070,7 @@ static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
return 0;
}
-/* Called with s->dma_map_lock */
+/* Called with s->dma_map_lock, may be run in any AioContext */
static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
QEMUIOVector *qiov)
{
@@ -1061,13 +1081,17 @@ static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
r = qemu_vfio_dma_reset_temporary(s->vfio);
if (!r) {
+ /*
+ * Queue access is protected by the dma_map_lock, and all
+ * coroutines are woken in their own AioContext
+ */
qemu_co_queue_restart_all(&s->dma_flush_queue);
}
}
return r;
}
-/* Called with s->dma_map_lock */
+/* Called with s->dma_map_lock, may be run in any AioContext */
static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
NVMeRequest *req, QEMUIOVector *qiov)
{
@@ -1180,6 +1204,7 @@ typedef struct {
int ret;
} NVMeCoData;
+/* Put into NVMeRequest.cb, so runs in the BDS's main AioContext */
static void nvme_rw_cb(void *opaque, int ret)
{
NVMeCoData *data = opaque;
--
2.51.1
next prev parent reply other threads:[~2025-11-10 15:51 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-10 15:48 [PATCH v2 00/19] block: Some multi-threading fixes Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 01/19] block: Note on aio_co_wake use if not yet yielding Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 02/19] rbd: Run co BH CB in the coroutine’s AioContext Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 03/19] iscsi: " Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 04/19] nfs: " Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 05/19] curl: Fix coroutine waking Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 06/19] gluster: Do not move coroutine into BDS context Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 07/19] nvme: Kick and check completions in " Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 08/19] nvme: Fix coroutine waking Hanna Czenczek
2025-11-10 15:48 ` Hanna Czenczek [this message]
2025-11-10 15:48 ` [PATCH v2 10/19] block/io: Take reqs_lock for tracked_requests Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 11/19] qcow2: Re-initialize lock in invalidate_cache Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 12/19] qcow2: Fix cache_clean_timer Hanna Czenczek
2025-11-17 14:50 ` Kevin Wolf
2025-11-18 11:01 ` Hanna Czenczek
2025-11-18 17:06 ` Kevin Wolf
2025-11-18 17:19 ` Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 13/19] qcow2: Schedule cache-clean-timer in realtime Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 14/19] ssh: Run restart_coroutine in current AioContext Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 15/19] blkreplay: Run BH in coroutine’s AioContext Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 16/19] block: Note in which AioContext AIO CBs are called Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 17/19] iscsi: Create AIO BH in original AioContext Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 18/19] null-aio: Run CB " Hanna Czenczek
2025-11-10 15:48 ` [PATCH v2 19/19] win32-aio: Run CB in original context Hanna Czenczek
2025-11-17 15:11 ` [PATCH v2 00/19] block: Some multi-threading fixes Kevin Wolf
2025-11-17 19:11 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251110154854.151484-10-hreitz@redhat.com \
--to=hreitz@redhat.com \
--cc=alex.bennee@linaro.org \
--cc=fam@euphon.net \
--cc=idryomov@gmail.com \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=philmd@linaro.org \
--cc=pl@dlhnet.de \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=rjones@redhat.com \
--cc=ronniesahlberg@gmail.com \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).