io-uring.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHv5 4/4] Add mixed sqe test for uring commands
  2025-10-13 18:00 [PATCHv5 0/4] liburing: support for mix sized sqe's Keith Busch
@ 2025-10-13 18:00 ` Keith Busch
  0 siblings, 0 replies; 3+ messages in thread
From: Keith Busch @ 2025-10-13 18:00 UTC (permalink / raw)
  To: io-uring, axboe, csander; +Cc: Keith Busch

From: Keith Busch <kbusch@kernel.org>

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 test/Makefile              |   1 +
 test/sqe-mixed-uring_cmd.c | 140 +++++++++++++++++++++++++++++++++++++
 2 files changed, 141 insertions(+)

diff --git a/test/Makefile b/test/Makefile
index 2c250c81..2b2e3967 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -236,6 +236,7 @@ test_srcs := \
 	sq-space_left.c \
 	sqe-mixed-nop.c \
 	sqe-mixed-bad-wrap.c \
+	sqe-mixed-uring_cmd.c \
 	sqwait.c \
 	stdout.c \
 	submit-and-wait.c \
diff --git a/test/sqe-mixed-uring_cmd.c b/test/sqe-mixed-uring_cmd.c
index e69de29b..7ac5f4ab 100644
--- a/test/sqe-mixed-uring_cmd.c
+++ b/test/sqe-mixed-uring_cmd.c
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: mixed sqes utilizing basic nop and io_uring passthrough commands
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "helpers.h"
+#include "liburing.h"
+#include "nvme.h"
+
+#define len 0x1000
+static unsigned char buf[len];
+static int seq;
+
+static int test_single_nop(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		return T_EXIT_FAIL;
+	}
+
+	io_uring_prep_nop(sqe);
+	sqe->user_data = ++seq;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		return T_EXIT_FAIL;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0)
+		fprintf(stderr, "wait completion %d\n", ret);
+	else if (cqe->user_data != seq)
+		fprintf(stderr, "Unexpected user_data: %ld\n", (long) cqe->user_data);
+	else {
+		io_uring_cqe_seen(ring, cqe);
+		return T_EXIT_PASS;
+	}
+	return T_EXIT_FAIL;
+}
+
+static int test_single_nvme_read(struct io_uring *ring, int fd)
+{
+	struct nvme_uring_cmd *cmd;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	sqe = io_uring_get_sqe128_mixed(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		return T_EXIT_FAIL;
+	}
+
+	io_uring_prep_uring_cmd128(sqe, NVME_URING_CMD_IO, fd);
+	sqe->user_data = ++seq;
+
+	cmd = (struct nvme_uring_cmd *)sqe->cmd;
+	memset(cmd, 0, sizeof(struct nvme_uring_cmd));
+	cmd->opcode = nvme_cmd_read;
+	cmd->cdw12 = (len >> lba_shift) - 1;
+	cmd->addr = (__u64)(uintptr_t)buf;
+	cmd->data_len = len;
+	cmd->nsid = nsid;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		return T_EXIT_FAIL;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0)
+		fprintf(stderr, "wait completion %d\n", ret);
+	else if (cqe->res != 0)
+		fprintf(stderr, "cqe res %d, wanted 0\n", cqe->res);
+	else if (cqe->user_data != seq)
+		fprintf(stderr, "Unexpected user_data: %ld\n", (long) cqe->user_data);
+	else {
+		io_uring_cqe_seen(ring, cqe);
+		return T_EXIT_PASS;
+	}
+	return T_EXIT_FAIL;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int fd, ret, i;
+
+	if (argc < 2)
+		return T_EXIT_SKIP;
+
+	ret = nvme_get_info(argv[1]);
+	if (ret)
+		return T_EXIT_SKIP;
+
+	fd = open(argv[1], O_RDONLY);
+	if (fd < 0) {
+		if (errno == EACCES || errno == EPERM)
+			return T_EXIT_SKIP;
+		perror("file open");
+		return T_EXIT_FAIL;
+	}
+
+	ret = io_uring_queue_init(8, &ring,
+		IORING_SETUP_CQE_MIXED | IORING_SETUP_SQE_MIXED);
+	if (ret) {
+		if (ret == -EINVAL)
+			ret = T_EXIT_SKIP;
+		else {
+			fprintf(stderr, "ring setup failed: %d\n", ret);
+			ret = T_EXIT_FAIL;
+		}
+		goto close;
+	}
+
+	for (i = 0; i < 32; i++) {
+		if (i & 1)
+			ret = test_single_nvme_read(&ring, fd);
+		else
+			ret = test_single_nop(&ring);
+
+		if (ret)
+			break;
+	}
+
+	io_uring_queue_exit(&ring);
+close:
+	close(fd);
+	return ret;
+}
-- 
2.47.3


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCHv5 2/4] Add support IORING_SETUP_SQE_MIXED
       [not found] ` <20251021213329.784558-3-kbusch@meta.com>
@ 2025-10-21 22:08   ` Jens Axboe
  0 siblings, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2025-10-21 22:08 UTC (permalink / raw)
  To: Keith Busch, csander, io-uring; +Cc: Keith Busch

(Adding right list)

On 10/21/25 3:33 PM, Keith Busch wrote:
> +/*
> + * Return a 128B sqe to fill. Applications must later call io_uring_submit()
> + * when it's ready to tell the kernel about it. The caller may call this
> + * function multiple times before calling io_uring_submit().
> + *
> + * Returns a vacant 128B sqe, or NULL if we're full. If the current tail is the
> + * last entry in the ring, this function will insert a nop + skip complete such
> + * that the 128b entry wraps back to the beginning of the queue for a
> + * contiguous big sq entry. It's up to the caller to use a 128b opcode in order
> + * for the kernel to know how to advance its sq head pointer.
> + */
> +IOURINGINLINE struct io_uring_sqe *io_uring_get_sqe128_mixed(struct io_uring *ring)
> +	LIBURING_NOEXCEPT
> +{

I would probably just name this io_uring_get_sqe128() and have it work
for both MIXED and SQE128. That would make for a cleaner API for the
application.

> +	struct io_uring_sq *sq = &ring->sq;
> +	unsigned head = io_uring_load_sq_head(ring), tail = sq->sqe_tail;
> +	struct io_uring_sqe *sqe;
> +
> +	if (!(ring->flags & IORING_SETUP_SQE_MIXED))
> +		return NULL;
> +
> +	if (((tail + 1) & sq->ring_mask) == 0) {
> +		if ((tail + 2) - head >= sq->ring_entries)
> +			return NULL;
> +
> +		sqe = _io_uring_get_sqe(ring);
> +		io_uring_prep_nop(sqe);
> +		sqe->flags |= IOSQE_CQE_SKIP_SUCCESS;
> +		tail = sq->sqe_tail;
> +	} else if ((tail + 1) - head >= sq->ring_entries) {
> +		return NULL;
> +	}
> +
> +	sqe = &sq->sqes[tail & sq->ring_mask];
> +	sq->sqe_tail = tail + 2;
> +	io_uring_initialize_sqe(sqe);
> +
> +	return sqe;
> +}

Spurious newline before turn, just group them.

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCHv5 4/4] Add mixed sqe test for uring commands
       [not found] ` <20251021213329.784558-5-kbusch@meta.com>
@ 2025-10-21 22:09   ` Jens Axboe
  0 siblings, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2025-10-21 22:09 UTC (permalink / raw)
  To: Keith Busch, csander, io-uring; +Cc: Keith Busch

> +	ret = io_uring_wait_cqe(ring, &cqe);
> +	if (ret < 0)
> +		fprintf(stderr, "wait completion %d\n", ret);
> +	else if (cqe->user_data != seq)
> +		fprintf(stderr, "Unexpected user_data: %ld\n", (long) cqe->user_data);
> +	else {
> +		io_uring_cqe_seen(ring, cqe);
> +		return T_EXIT_PASS;
> +	}
> +	return T_EXIT_FAIL;

All braces if one has braces. In a few different spots.

Outside of those little nits and the previous comment, I think this is
looking fine.

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-10-21 22:09 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20251021213329.784558-1-kbusch@meta.com>
     [not found] ` <20251021213329.784558-3-kbusch@meta.com>
2025-10-21 22:08   ` [PATCHv5 2/4] Add support IORING_SETUP_SQE_MIXED Jens Axboe
     [not found] ` <20251021213329.784558-5-kbusch@meta.com>
2025-10-21 22:09   ` [PATCHv5 4/4] Add mixed sqe test for uring commands Jens Axboe
2025-10-13 18:00 [PATCHv5 0/4] liburing: support for mix sized sqe's Keith Busch
2025-10-13 18:00 ` [PATCHv5 4/4] Add mixed sqe test for uring commands Keith Busch

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).