From: Uday Shankar <ushankar@purestorage.com>
To: Ming Lei <ming.lei@redhat.com>, Jens Axboe <axboe@kernel.dk>,
Caleb Sander Mateos <csander@purestorage.com>,
Andrew Morton <akpm@linux-foundation.org>,
Shuah Khan <shuah@kernel.org>, Jonathan Corbet <corbet@lwn.net>
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-kselftest@vger.kernel.org, linux-doc@vger.kernel.org,
Uday Shankar <ushankar@purestorage.com>
Subject: [PATCH v8 4/9] selftests: ublk: kublk: lift queue initialization out of thread
Date: Thu, 29 May 2025 17:47:13 -0600 [thread overview]
Message-ID: <20250529-ublk_task_per_io-v8-4-e9d3b119336a@purestorage.com> (raw)
In-Reply-To: <20250529-ublk_task_per_io-v8-0-e9d3b119336a@purestorage.com>
Currently, each ublk server I/O handler thread initializes its own
queue. However, as we move towards decoupled ublk_queues and ublk server
threads, this model does not make sense anymore, as there will no longer
be a concept of a thread having "its own" queue. So lift queue
initialization out of the per-thread ublk_io_handler_fn and into a loop
in ublk_start_daemon (which runs once for each device).
There is a part of ublk_queue_init (ring initialization) which does
actually need to happen on the thread that will use the ring; that is
separated into a separate ublk_thread_init which is still called by each
I/O handler thread.
Signed-off-by: Uday Shankar <ushankar@purestorage.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
---
tools/testing/selftests/ublk/kublk.c | 68 +++++++++++++++++++++++++-----------
1 file changed, 47 insertions(+), 21 deletions(-)
diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c
index 1602cf6f07a02e5dab293b91f301218c38c8f4d9..2d6d163b74483a07066f47fd36781782ce25a16e 100644
--- a/tools/testing/selftests/ublk/kublk.c
+++ b/tools/testing/selftests/ublk/kublk.c
@@ -412,6 +412,17 @@ static void ublk_queue_deinit(struct ublk_queue *q)
int i;
int nr_ios = q->q_depth;
+ if (q->io_cmd_buf)
+ munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q));
+
+ for (i = 0; i < nr_ios; i++)
+ free(q->ios[i].buf_addr);
+}
+
+static void ublk_thread_deinit(struct ublk_queue *q)
+{
+ q->tid = 0;
+
io_uring_unregister_buffers(&q->ring);
io_uring_unregister_ring_fd(&q->ring);
@@ -421,28 +432,20 @@ static void ublk_queue_deinit(struct ublk_queue *q)
close(q->ring.ring_fd);
q->ring.ring_fd = -1;
}
-
- if (q->io_cmd_buf)
- munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q));
-
- for (i = 0; i < nr_ios; i++)
- free(q->ios[i].buf_addr);
}
static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
{
struct ublk_dev *dev = q->dev;
int depth = dev->dev_info.queue_depth;
- int i, ret = -1;
+ int i;
int cmd_buf_size, io_buf_size;
unsigned long off;
- int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth;
q->tgt_ops = dev->tgt.ops;
q->state = 0;
q->q_depth = depth;
q->cmd_inflight = 0;
- q->tid = gettid();
if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_AUTO_BUF_REG)) {
q->state |= UBLKSRV_NO_BUF;
@@ -479,6 +482,22 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
}
}
+ return 0;
+ fail:
+ ublk_queue_deinit(q);
+ ublk_err("ublk dev %d queue %d failed\n",
+ dev->dev_info.dev_id, q->q_id);
+ return -ENOMEM;
+}
+
+static int ublk_thread_init(struct ublk_queue *q)
+{
+ struct ublk_dev *dev = q->dev;
+ int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth;
+ int ret;
+
+ q->tid = gettid();
+
ret = ublk_setup_ring(&q->ring, ring_depth, cq_depth,
IORING_SETUP_COOP_TASKRUN |
IORING_SETUP_SINGLE_ISSUER |
@@ -508,9 +527,9 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
}
return 0;
- fail:
- ublk_queue_deinit(q);
- ublk_err("ublk dev %d queue %d failed\n",
+fail:
+ ublk_thread_deinit(q);
+ ublk_err("ublk dev %d queue %d thread init failed\n",
dev->dev_info.dev_id, q->q_id);
return -ENOMEM;
}
@@ -778,7 +797,6 @@ struct ublk_queue_info {
struct ublk_queue *q;
sem_t *queue_sem;
cpu_set_t *affinity;
- unsigned char auto_zc_fallback;
};
static void *ublk_io_handler_fn(void *data)
@@ -786,15 +804,11 @@ static void *ublk_io_handler_fn(void *data)
struct ublk_queue_info *info = data;
struct ublk_queue *q = info->q;
int dev_id = q->dev->dev_info.dev_id;
- unsigned extra_flags = 0;
int ret;
- if (info->auto_zc_fallback)
- extra_flags = UBLKSRV_AUTO_BUF_REG_FALLBACK;
-
- ret = ublk_queue_init(q, extra_flags);
+ ret = ublk_thread_init(q);
if (ret) {
- ublk_err("ublk dev %d queue %d init queue failed\n",
+ ublk_err("ublk dev %d queue %d thread init failed\n",
dev_id, q->q_id);
return NULL;
}
@@ -813,7 +827,7 @@ static void *ublk_io_handler_fn(void *data)
} while (1);
ublk_dbg(UBLK_DBG_QUEUE, "ublk dev %d queue %d exited\n", dev_id, q->q_id);
- ublk_queue_deinit(q);
+ ublk_thread_deinit(q);
return NULL;
}
@@ -857,6 +871,7 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
{
const struct ublksrv_ctrl_dev_info *dinfo = &dev->dev_info;
struct ublk_queue_info *qinfo;
+ unsigned extra_flags = 0;
cpu_set_t *affinity_buf;
void *thread_ret;
sem_t queue_sem;
@@ -878,14 +893,23 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
if (ret)
return ret;
+ if (ctx->auto_zc_fallback)
+ extra_flags = UBLKSRV_AUTO_BUF_REG_FALLBACK;
+
for (i = 0; i < dinfo->nr_hw_queues; i++) {
dev->q[i].dev = dev;
dev->q[i].q_id = i;
+ ret = ublk_queue_init(&dev->q[i], extra_flags);
+ if (ret) {
+ ublk_err("ublk dev %d queue %d init queue failed\n",
+ dinfo->dev_id, i);
+ goto fail;
+ }
+
qinfo[i].q = &dev->q[i];
qinfo[i].queue_sem = &queue_sem;
qinfo[i].affinity = &affinity_buf[i];
- qinfo[i].auto_zc_fallback = ctx->auto_zc_fallback;
pthread_create(&dev->q[i].thread, NULL,
ublk_io_handler_fn,
&qinfo[i]);
@@ -918,6 +942,8 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
for (i = 0; i < dinfo->nr_hw_queues; i++)
pthread_join(dev->q[i].thread, &thread_ret);
fail:
+ for (i = 0; i < dinfo->nr_hw_queues; i++)
+ ublk_queue_deinit(&dev->q[i]);
ublk_dev_unprep(dev);
ublk_dbg(UBLK_DBG_DEV, "%s exit\n", __func__);
--
2.34.1
next prev parent reply other threads:[~2025-05-29 23:47 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-29 23:47 [PATCH v8 0/9] ublk: decouple server threads from ublk_queues/hctxs Uday Shankar
2025-05-29 23:47 ` [PATCH v8 1/9] ublk: have a per-io daemon instead of a per-queue daemon Uday Shankar
2025-05-30 1:33 ` Jens Axboe
2025-05-30 2:05 ` Ming Lei
2025-05-29 23:47 ` [PATCH v8 2/9] selftests: ublk: kublk: plumb q_id in io_uring user_data Uday Shankar
2025-05-29 23:47 ` [PATCH v8 3/9] selftests: ublk: kublk: tie sqe allocation to io instead of queue Uday Shankar
2025-05-29 23:47 ` Uday Shankar [this message]
2025-05-29 23:47 ` [PATCH v8 5/9] selftests: ublk: kublk: move per-thread data out of ublk_queue Uday Shankar
2025-05-29 23:47 ` [PATCH v8 6/9] selftests: ublk: kublk: decouple ublk_queues from ublk server threads Uday Shankar
2025-05-29 23:47 ` [PATCH v8 7/9] selftests: ublk: add functional test for per io daemons Uday Shankar
2025-05-29 23:47 ` [PATCH v8 8/9] selftests: ublk: add stress " Uday Shankar
2025-05-30 2:13 ` Ming Lei
2025-05-29 23:47 ` [PATCH v8 9/9] Documentation: ublk: document UBLK_F_PER_IO_DAEMON Uday Shankar
2025-05-30 13:07 ` [PATCH v8 0/9] ublk: decouple server threads from ublk_queues/hctxs Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250529-ublk_task_per_io-v8-4-e9d3b119336a@purestorage.com \
--to=ushankar@purestorage.com \
--cc=akpm@linux-foundation.org \
--cc=axboe@kernel.dk \
--cc=corbet@lwn.net \
--cc=csander@purestorage.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=ming.lei@redhat.com \
--cc=shuah@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).