* [LTP] [PATCH v2 1/3] io_uring: Test IORING READ and WRITE operations
2026-03-24 5:22 [LTP] [PATCH v2 0/3] io_uring READ(V), WRITE(v) operation tests Sachin Sant
@ 2026-03-24 5:22 ` Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 2/3] io_uring: Test READV and WRITEV operations Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 3/3] io_uring: Refactor io_uring01 to use common code Sachin Sant
2 siblings, 0 replies; 4+ messages in thread
From: Sachin Sant @ 2026-03-24 5:22 UTC (permalink / raw)
To: ltp
This test validates basic read and write operations using io_uring.
It tests:
1. IORING_OP_WRITE - Writing data to a file
2. IORING_OP_READ - Reading data from a file
3. Data integrity verification
This patch also introduces a header file for common functions.
Signed-off-by: Sachin Sant <sachinp@linux.ibm.com>
---
runtest/syscalls | 1 +
testcases/kernel/syscalls/io_uring/.gitignore | 1 +
.../kernel/syscalls/io_uring/io_uring03.c | 140 +++++++++
.../syscalls/io_uring/io_uring_common.h | 278 ++++++++++++++++++
4 files changed, 420 insertions(+)
create mode 100644 testcases/kernel/syscalls/io_uring/io_uring03.c
create mode 100644 testcases/kernel/syscalls/io_uring/io_uring_common.h
diff --git a/runtest/syscalls b/runtest/syscalls
index 2179e007c..7dc80fe29 100644
--- a/runtest/syscalls
+++ b/runtest/syscalls
@@ -1898,6 +1898,7 @@ membarrier01 membarrier01
io_uring01 io_uring01
io_uring02 io_uring02
+io_uring03 io_uring03
# Tests below may cause kernel memory leak
perf_event_open03 perf_event_open03
diff --git a/testcases/kernel/syscalls/io_uring/.gitignore b/testcases/kernel/syscalls/io_uring/.gitignore
index 749db17db..9382ae413 100644
--- a/testcases/kernel/syscalls/io_uring/.gitignore
+++ b/testcases/kernel/syscalls/io_uring/.gitignore
@@ -1,2 +1,3 @@
/io_uring01
/io_uring02
+/io_uring03
diff --git a/testcases/kernel/syscalls/io_uring/io_uring03.c b/testcases/kernel/syscalls/io_uring/io_uring03.c
new file mode 100644
index 000000000..645c96b02
--- /dev/null
+++ b/testcases/kernel/syscalls/io_uring/io_uring03.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2026 IBM
+ * Author: Sachin Sant <sachinp@linux.ibm.com>
+ */
+/*
+ * Test IORING_OP_READ and IORING_OP_WRITE operations.
+ *
+ * This test validates basic read and write operations using io_uring.
+ * It tests:
+ * 1. IORING_OP_WRITE - Writing data to a file
+ * 2. IORING_OP_READ - Reading data from a file
+ * 3. Data integrity verification
+ */
+
+#include "io_uring_common.h"
+
+#define TEST_FILE "io_uring_test_file"
+#define QUEUE_DEPTH 2
+#define BLOCK_SZ 4096
+
+static char *write_buf;
+static char *read_buf;
+static struct io_uring_submit s;
+static sigset_t sig;
+
+static void init_buffer(char start_char)
+{
+ size_t i;
+
+ for (i = 0; i < BLOCK_SZ; i++)
+ write_buf[i] = start_char + (i % 26);
+}
+
+static void verify_data_integrity(const char *test_name)
+{
+ size_t i;
+
+ if (memcmp(write_buf, read_buf, BLOCK_SZ) == 0) {
+ tst_res(TPASS, "%s data integrity verified", test_name);
+ } else {
+ tst_res(TFAIL, "%s data mismatch", test_name);
+ for (i = 0; i < BLOCK_SZ && i < 64; i++) {
+ if (write_buf[i] != read_buf[i]) {
+ tst_res(TINFO, "First mismatch at offset %zu: "
+ "wrote 0x%02x, read 0x%02x",
+ i, write_buf[i], read_buf[i]);
+ break;
+ }
+ }
+ }
+}
+
+static void test_write_read(void)
+{
+ int fd;
+
+ init_buffer('A');
+
+ fd = SAFE_OPEN(TEST_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
+
+ tst_res(TINFO, "Testing IORING_OP_WRITE");
+ io_uring_do_io_op(&s, fd, IORING_OP_WRITE, write_buf, BLOCK_SZ, 0,
+ &sig);
+
+ SAFE_FSYNC(fd);
+
+ tst_res(TINFO, "Testing IORING_OP_READ");
+ memset(read_buf, 0, BLOCK_SZ);
+ io_uring_do_io_op(&s, fd, IORING_OP_READ, read_buf, BLOCK_SZ, 0,
+ &sig);
+
+ verify_data_integrity("Basic I/O");
+
+ SAFE_CLOSE(fd);
+}
+
+static void test_partial_io(void)
+{
+ int fd;
+ size_t half = BLOCK_SZ / 2;
+
+ tst_res(TINFO, "Testing partial I/O operations");
+
+ init_buffer('a');
+
+ fd = SAFE_OPEN(TEST_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
+
+ io_uring_do_io_op(&s, fd, IORING_OP_WRITE, write_buf, half, 0,
+ &sig);
+
+ io_uring_do_io_op(&s, fd, IORING_OP_WRITE, write_buf + half, half,
+ half, &sig);
+
+ SAFE_FSYNC(fd);
+
+ memset(read_buf, 0, BLOCK_SZ);
+ io_uring_do_io_op(&s, fd, IORING_OP_READ, read_buf, BLOCK_SZ, 0,
+ &sig);
+
+ verify_data_integrity("Partial I/O");
+
+ SAFE_CLOSE(fd);
+}
+
+static void run(void)
+{
+ test_write_read();
+ test_partial_io();
+}
+
+static void setup(void)
+{
+ io_uring_setup_supported_by_kernel();
+ sigemptyset(&sig);
+ memset(&s, 0, sizeof(s));
+ io_uring_setup_queue(&s, QUEUE_DEPTH, 0);
+}
+
+static void cleanup(void)
+{
+ io_uring_cleanup_queue(&s, QUEUE_DEPTH);
+}
+
+static struct tst_test test = {
+ .test_all = run,
+ .setup = setup,
+ .cleanup = cleanup,
+ .needs_tmpdir = 1,
+ .bufs = (struct tst_buffers []) {
+ {&write_buf, .size = BLOCK_SZ},
+ {&read_buf, .size = BLOCK_SZ},
+ {}
+ },
+ .save_restore = (const struct tst_path_val[]) {
+ {"/proc/sys/kernel/io_uring_disabled", "0",
+ TST_SR_SKIP_MISSING | TST_SR_TCONF_RO},
+ {}
+ }
+};
diff --git a/testcases/kernel/syscalls/io_uring/io_uring_common.h b/testcases/kernel/syscalls/io_uring/io_uring_common.h
new file mode 100644
index 000000000..aa31339fb
--- /dev/null
+++ b/testcases/kernel/syscalls/io_uring/io_uring_common.h
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2026 IBM
+ * Author: Sachin Sant <sachinp@linux.ibm.com>
+ *
+ * Common definitions and helper functions for io_uring tests
+ */
+
+#ifndef IO_URING_COMMON_H
+#define IO_URING_COMMON_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include "config.h"
+#include "tst_test.h"
+#include "lapi/io_uring.h"
+
+/* Common structures for io_uring ring management */
+struct io_sq_ring {
+ unsigned int *head;
+ unsigned int *tail;
+ unsigned int *ring_mask;
+ unsigned int *ring_entries;
+ unsigned int *flags;
+ unsigned int *array;
+};
+
+struct io_cq_ring {
+ unsigned int *head;
+ unsigned int *tail;
+ unsigned int *ring_mask;
+ unsigned int *ring_entries;
+ struct io_uring_cqe *cqes;
+};
+
+struct io_uring_submit {
+ int ring_fd;
+ struct io_sq_ring sq_ring;
+ struct io_uring_sqe *sqes;
+ struct io_cq_ring cq_ring;
+ void *sq_ptr;
+ size_t sq_ptr_size;
+ void *cq_ptr;
+ size_t cq_ptr_size;
+};
+
+/*
+ * Setup io_uring instance with specified queue depth and optional flags
+ * Returns 0 on success, -1 on failure
+ */
+static inline int io_uring_setup_queue(struct io_uring_submit *s,
+ unsigned int queue_depth,
+ unsigned int flags)
+{
+ struct io_sq_ring *sring = &s->sq_ring;
+ struct io_cq_ring *cring = &s->cq_ring;
+ struct io_uring_params p;
+
+ memset(&p, 0, sizeof(p));
+ p.flags = flags;
+ s->ring_fd = io_uring_setup(queue_depth, &p);
+ if (s->ring_fd < 0) {
+ tst_brk(TBROK | TERRNO, "io_uring_setup() failed");
+ return -1;
+ }
+
+ s->sq_ptr_size = p.sq_off.array + p.sq_entries * sizeof(unsigned int);
+
+ s->sq_ptr = SAFE_MMAP(0, s->sq_ptr_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, s->ring_fd,
+ IORING_OFF_SQ_RING);
+
+ /* Save submission queue pointers */
+ sring->head = s->sq_ptr + p.sq_off.head;
+ sring->tail = s->sq_ptr + p.sq_off.tail;
+ sring->ring_mask = s->sq_ptr + p.sq_off.ring_mask;
+ sring->ring_entries = s->sq_ptr + p.sq_off.ring_entries;
+ sring->flags = s->sq_ptr + p.sq_off.flags;
+ sring->array = s->sq_ptr + p.sq_off.array;
+
+ s->sqes = SAFE_MMAP(0, p.sq_entries * sizeof(struct io_uring_sqe),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ s->ring_fd, IORING_OFF_SQES);
+
+ s->cq_ptr_size = p.cq_off.cqes +
+ p.cq_entries * sizeof(struct io_uring_cqe);
+
+ s->cq_ptr = SAFE_MMAP(0, s->cq_ptr_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, s->ring_fd,
+ IORING_OFF_CQ_RING);
+
+ /* Save completion queue pointers */
+ cring->head = s->cq_ptr + p.cq_off.head;
+ cring->tail = s->cq_ptr + p.cq_off.tail;
+ cring->ring_mask = s->cq_ptr + p.cq_off.ring_mask;
+ cring->ring_entries = s->cq_ptr + p.cq_off.ring_entries;
+ cring->cqes = s->cq_ptr + p.cq_off.cqes;
+
+ return 0;
+}
+
+/*
+ * Cleanup io_uring instance and unmap all memory regions
+ */
+static inline void io_uring_cleanup_queue(struct io_uring_submit *s,
+ unsigned int queue_depth)
+{
+ if (s->sqes)
+ SAFE_MUNMAP(s->sqes, queue_depth * sizeof(struct io_uring_sqe));
+ if (s->cq_ptr)
+ SAFE_MUNMAP(s->cq_ptr, s->cq_ptr_size);
+ if (s->sq_ptr)
+ SAFE_MUNMAP(s->sq_ptr, s->sq_ptr_size);
+ if (s->ring_fd > 0)
+ SAFE_CLOSE(s->ring_fd);
+}
+
+/*
+ * Internal helper to submit a single SQE to the submission queue
+ * Used by both vectored and non-vectored I/O operations
+ */
+static inline void io_uring_submit_sqe_internal(struct io_uring_submit *s,
+ int fd, int opcode,
+ unsigned long addr,
+ unsigned int len,
+ off_t offset,
+ unsigned long long user_data)
+{
+ struct io_sq_ring *sring = &s->sq_ring;
+ unsigned int tail, index;
+ struct io_uring_sqe *sqe;
+
+ tail = *sring->tail;
+ index = tail & *sring->ring_mask;
+ sqe = &s->sqes[index];
+
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = opcode;
+ sqe->fd = fd;
+ sqe->addr = addr;
+ sqe->len = len;
+ sqe->off = offset;
+ sqe->user_data = user_data;
+
+ sring->array[index] = index;
+ tail++;
+
+ *sring->tail = tail;
+}
+
+/*
+ * Submit a single SQE to the submission queue
+ * For basic read/write operations (non-vectored)
+ */
+static inline void io_uring_submit_sqe(struct io_uring_submit *s, int fd,
+ int opcode, void *buf, size_t len,
+ off_t offset)
+{
+ io_uring_submit_sqe_internal(s, fd, opcode, (unsigned long)buf,
+ len, offset, opcode);
+}
+
+/*
+ * Submit a vectored SQE to the submission queue
+ * For readv/writev operations
+ */
+static inline void io_uring_submit_sqe_vec(struct io_uring_submit *s, int fd,
+ int opcode, struct iovec *iovs,
+ int nr_vecs, off_t offset)
+{
+ io_uring_submit_sqe_internal(s, fd, opcode, (unsigned long)iovs,
+ nr_vecs, offset, opcode);
+}
+
+/*
+ * Map io_uring operation code to human-readable name
+ */
+static inline const char *ioring_op_name(int op)
+{
+ switch (op) {
+ case IORING_OP_READV:
+ return "IORING_OP_READV";
+ case IORING_OP_WRITEV:
+ return "IORING_OP_WRITEV";
+ case IORING_OP_READ:
+ return "IORING_OP_READ";
+ case IORING_OP_WRITE:
+ return "IORING_OP_WRITE";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/*
+ * Wait for and validate a completion queue entry
+ * Aborts test on failure using tst_brk()
+ */
+static inline void io_uring_wait_cqe(struct io_uring_submit *s,
+ int expected_res, int expected_opcode,
+ sigset_t *sig)
+{
+ struct io_cq_ring *cring = &s->cq_ring;
+ struct io_uring_cqe *cqe;
+ unsigned int head;
+ int ret;
+
+ ret = io_uring_enter(s->ring_fd, 1, 1, IORING_ENTER_GETEVENTS, sig);
+ if (ret < 0)
+ tst_brk(TBROK | TERRNO, "io_uring_enter() failed");
+
+ head = *cring->head;
+ if (head == *cring->tail)
+ tst_brk(TBROK, "No completion event received");
+
+ cqe = &cring->cqes[head & *cring->ring_mask];
+
+ if (cqe->user_data != (uint64_t)expected_opcode) {
+ *cring->head = head + 1;
+ tst_brk(TBROK, "Unexpected user_data: got %llu, expected %d",
+ (unsigned long long)cqe->user_data, expected_opcode);
+ }
+
+ if (cqe->res != expected_res) {
+ *cring->head = head + 1;
+ tst_brk(TBROK, "Operation failed: res=%d, expected=%d",
+ cqe->res, expected_res);
+ }
+
+ *cring->head = head + 1;
+}
+
+/*
+ * Initialize buffer with a repeating character pattern
+ * Useful for creating test data with predictable patterns
+ */
+static inline void io_uring_init_buffer_pattern(char *buf, size_t size,
+ char pattern)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ buf[i] = pattern;
+}
+
+/*
+ * Submit and wait for a non-vectored I/O operation
+ * Combines io_uring_submit_sqe() and io_uring_wait_cqe() with result reporting
+ */
+static inline void io_uring_do_io_op(struct io_uring_submit *s, int fd,
+ int op, void *buf, size_t len,
+ off_t offset, sigset_t *sig)
+{
+ io_uring_submit_sqe(s, fd, op, buf, len, offset);
+ io_uring_wait_cqe(s, len, op, sig);
+ tst_res(TPASS, "OP=%s (%02x) fd=%i buf=%p len=%zu offset=%jd",
+ ioring_op_name(op), op, fd, buf, len, (intmax_t)offset);
+}
+
+/*
+ * Submit and wait for a vectored I/O operation
+ * Combines io_uring_submit_sqe_vec() and io_uring_wait_cqe() with
+ * result reporting
+ */
+static inline void io_uring_do_vec_io_op(struct io_uring_submit *s, int fd,
+ int op, struct iovec *iovs,
+ int nvecs, off_t offset,
+ int expected_size, sigset_t *sig)
+{
+ io_uring_submit_sqe_vec(s, fd, op, iovs, nvecs, offset);
+ io_uring_wait_cqe(s, expected_size, op, sig);
+ tst_res(TPASS, "OP=%s (%02x) fd=%i iovs=%p nvecs=%i offset=%jd "
+ "expected_size=%i",
+ ioring_op_name(op), op, fd, iovs, nvecs, (intmax_t)offset,
+ expected_size);
+}
+
+#endif /* IO_URING_COMMON_H */
--
2.39.1
--
Mailing list info: https://lists.linux.it/listinfo/ltp
^ permalink raw reply related [flat|nested] 4+ messages in thread* [LTP] [PATCH v2 2/3] io_uring: Test READV and WRITEV operations
2026-03-24 5:22 [LTP] [PATCH v2 0/3] io_uring READ(V), WRITE(v) operation tests Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 1/3] io_uring: Test IORING READ and WRITE operations Sachin Sant
@ 2026-03-24 5:22 ` Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 3/3] io_uring: Refactor io_uring01 to use common code Sachin Sant
2 siblings, 0 replies; 4+ messages in thread
From: Sachin Sant @ 2026-03-24 5:22 UTC (permalink / raw)
To: ltp
This test validates vectored read and write operations using io_uring.
1. IORING_OP_WRITEV - Writing data using multiple buffers (scatter)
2. IORING_OP_READV - Reading data into multiple buffers (gather)
3. Data integrity verification across multiple iovecs
4. Edge cases with different iovec configurations including zero
buffer length
Signed-off-by: Sachin Sant <sachinp@linux.ibm.com>
---
runtest/syscalls | 1 +
testcases/kernel/syscalls/io_uring/.gitignore | 1 +
.../kernel/syscalls/io_uring/io_uring04.c | 216 ++++++++++++++++++
3 files changed, 218 insertions(+)
create mode 100644 testcases/kernel/syscalls/io_uring/io_uring04.c
diff --git a/runtest/syscalls b/runtest/syscalls
index 7dc80fe29..eacf946c5 100644
--- a/runtest/syscalls
+++ b/runtest/syscalls
@@ -1899,6 +1899,7 @@ membarrier01 membarrier01
io_uring01 io_uring01
io_uring02 io_uring02
io_uring03 io_uring03
+io_uring04 io_uring04
# Tests below may cause kernel memory leak
perf_event_open03 perf_event_open03
diff --git a/testcases/kernel/syscalls/io_uring/.gitignore b/testcases/kernel/syscalls/io_uring/.gitignore
index 9382ae413..36cd24662 100644
--- a/testcases/kernel/syscalls/io_uring/.gitignore
+++ b/testcases/kernel/syscalls/io_uring/.gitignore
@@ -1,3 +1,4 @@
/io_uring01
/io_uring02
/io_uring03
+/io_uring04
diff --git a/testcases/kernel/syscalls/io_uring/io_uring04.c b/testcases/kernel/syscalls/io_uring/io_uring04.c
new file mode 100644
index 000000000..48479df1f
--- /dev/null
+++ b/testcases/kernel/syscalls/io_uring/io_uring04.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2026 IBM
+ * Author: Sachin Sant <sachinp@linux.ibm.com>
+ */
+/*
+ * Test IORING_OP_READV and IORING_OP_WRITEV operations.
+ *
+ * This test validates vectored read and write operations using io_uring.
+ * It tests:
+ * 1. IORING_OP_WRITEV - Writing data using multiple buffers (scatter)
+ * 2. IORING_OP_READV - Reading data into multiple buffers (gather)
+ * 3. Data integrity verification across multiple iovecs
+ * 4. Edge cases with different iovec configurations
+ */
+
+#include "io_uring_common.h"
+
+#define TEST_FILE "io_uring_test_file"
+#define QUEUE_DEPTH 2
+#define NUM_VECS 4
+#define VEC_SIZE 1024
+#define VAR_BUF1_SIZE 512
+#define VAR_BUF2_SIZE 1024
+#define VAR_BUF3_SIZE 256
+
+static struct iovec *write_iovs, *read_iovs;
+static struct iovec *var_write_iovs, *var_read_iovs;
+static struct io_uring_submit s;
+static sigset_t sig;
+
+static void prepare_write_buffers(void)
+{
+ size_t i, j;
+ char *buf;
+
+ for (i = 0; i < NUM_VECS; i++) {
+ buf = (char *)write_iovs[i].iov_base;
+ for (j = 0; j < write_iovs[i].iov_len; j++) {
+ /* Each vector has a different pattern */
+ buf[j] = 'A' + i + (j % 26);
+ }
+ }
+}
+
+static void prepare_read_buffers(void)
+{
+ size_t i;
+
+ for (i = 0; i < NUM_VECS; i++)
+ memset(read_iovs[i].iov_base, 0, read_iovs[i].iov_len);
+}
+
+static void clear_iovec_buffers(struct iovec *iovs, int nvecs)
+{
+ int i;
+
+ for (i = 0; i < nvecs; i++)
+ memset(iovs[i].iov_base, 0, iovs[i].iov_len);
+}
+
+static void verify_iovec_data(struct iovec *write_iovs, struct iovec *read_iovs,
+ int nvecs, const char *test_name)
+{
+ int i;
+ size_t j;
+
+ for (i = 0; i < nvecs; i++) {
+ if (write_iovs[i].iov_len != read_iovs[i].iov_len) {
+ tst_res(TFAIL, "%s: iovec %d length mismatch: write=%zu read=%zu",
+ test_name, i, write_iovs[i].iov_len, read_iovs[i].iov_len);
+ return;
+ }
+
+ if (memcmp(write_iovs[i].iov_base, read_iovs[i].iov_base,
+ write_iovs[i].iov_len) != 0) {
+ tst_res(TFAIL, "%s: data mismatch in vector %d", test_name, i);
+ for (j = 0; j < write_iovs[i].iov_len && j < 64; j++) {
+ char *wbuf = (char *)write_iovs[i].iov_base;
+ char *rbuf = (char *)read_iovs[i].iov_base;
+ if (wbuf[j] != rbuf[j]) {
+ tst_res(TINFO, "Vector %d: first mismatch at "
+ "offset %zu: wrote 0x%02x, read 0x%02x",
+ i, j, wbuf[j], rbuf[j]);
+ break;
+ }
+ }
+ return;
+ }
+ }
+
+ tst_res(TPASS, "%s: data integrity verified across %d vectors",
+ test_name, nvecs);
+}
+
+
+static void test_writev_readv(void)
+{
+ int fd;
+ int total_size = NUM_VECS * VEC_SIZE;
+
+ tst_res(TINFO, "Testing IORING_OP_WRITEV and IORING_OP_READV");
+
+ fd = SAFE_OPEN(TEST_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
+
+ tst_res(TINFO, "Writing %d bytes using %d vectors", total_size, NUM_VECS);
+ io_uring_do_vec_io_op(&s, fd, IORING_OP_WRITEV, write_iovs, NUM_VECS,
+ 0, total_size, &sig);
+
+ SAFE_FSYNC(fd);
+
+ tst_res(TINFO, "Reading %d bytes using %d vectors", total_size, NUM_VECS);
+ io_uring_do_vec_io_op(&s, fd, IORING_OP_READV, read_iovs, NUM_VECS,
+ 0, total_size, &sig);
+
+ verify_iovec_data(write_iovs, read_iovs, NUM_VECS, "Basic vectored I/O");
+
+ SAFE_CLOSE(fd);
+}
+
+static void test_partial_vectors(void)
+{
+ int fd;
+ int half_size = 2 * VEC_SIZE;
+
+ tst_res(TINFO, "Testing partial vector operations");
+
+ fd = SAFE_OPEN(TEST_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
+
+ /* Write first half using first 2 vectors at offset 0 */
+ io_uring_do_vec_io_op(&s, fd, IORING_OP_WRITEV, write_iovs, 2, 0,
+ half_size, &sig);
+
+ /* Write second half using next 2 vectors at offset half_size */
+ io_uring_do_vec_io_op(&s, fd, IORING_OP_WRITEV, &write_iovs[2], 2,
+ half_size, half_size, &sig);
+
+ SAFE_FSYNC(fd);
+
+ /* Read back entire file using all 4 vectors */
+ io_uring_do_vec_io_op(&s, fd, IORING_OP_READV, read_iovs, NUM_VECS, 0,
+ NUM_VECS * VEC_SIZE, &sig);
+
+ verify_iovec_data(write_iovs, read_iovs, NUM_VECS, "Partial vector I/O");
+
+ SAFE_CLOSE(fd);
+}
+
+static void test_varying_sizes(void)
+{
+ int fd;
+ int expected_size = VAR_BUF1_SIZE + VAR_BUF2_SIZE + VAR_BUF3_SIZE;
+
+ tst_res(TINFO, "Testing vectors with varying sizes including zero-length buffer");
+
+ io_uring_init_buffer_pattern(var_write_iovs[0].iov_base, VAR_BUF1_SIZE, 'X');
+ io_uring_init_buffer_pattern(var_write_iovs[2].iov_base, VAR_BUF2_SIZE, 'Y');
+ io_uring_init_buffer_pattern(var_write_iovs[3].iov_base, VAR_BUF3_SIZE, 'Z');
+
+ clear_iovec_buffers(var_read_iovs, 4);
+
+ fd = SAFE_OPEN(TEST_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
+
+ io_uring_do_vec_io_op(&s, fd, IORING_OP_WRITEV, var_write_iovs, 4, 0,
+ expected_size, &sig);
+
+ SAFE_FSYNC(fd);
+
+ io_uring_do_vec_io_op(&s, fd, IORING_OP_READV, var_read_iovs, 4, 0,
+ expected_size, &sig);
+
+ verify_iovec_data(var_write_iovs, var_read_iovs, 4, "Varying size vector I/O with zero-length buffer");
+
+ SAFE_CLOSE(fd);
+}
+
+static void run(void)
+{
+ test_writev_readv();
+ test_partial_vectors();
+ test_varying_sizes();
+}
+
+static void setup(void)
+{
+ io_uring_setup_supported_by_kernel();
+ sigemptyset(&sig);
+ memset(&s, 0, sizeof(s));
+ io_uring_setup_queue(&s, QUEUE_DEPTH, 0);
+ prepare_write_buffers();
+ prepare_read_buffers();
+}
+
+static void cleanup(void)
+{
+ io_uring_cleanup_queue(&s, QUEUE_DEPTH);
+}
+
+static struct tst_test test = {
+ .test_all = run,
+ .setup = setup,
+ .cleanup = cleanup,
+ .needs_tmpdir = 1,
+ .bufs = (struct tst_buffers []) {
+ {&write_iovs, .iov_sizes = (int[]){VEC_SIZE, VEC_SIZE, VEC_SIZE, VEC_SIZE, -1}},
+ {&read_iovs, .iov_sizes = (int[]){VEC_SIZE, VEC_SIZE, VEC_SIZE, VEC_SIZE, -1}},
+ {&var_write_iovs, .iov_sizes = (int[]){VAR_BUF1_SIZE, 0, VAR_BUF2_SIZE, VAR_BUF3_SIZE, -1}},
+ {&var_read_iovs, .iov_sizes = (int[]){VAR_BUF1_SIZE, 0, VAR_BUF2_SIZE, VAR_BUF3_SIZE, -1}},
+ {}
+ },
+ .save_restore = (const struct tst_path_val[]) {
+ {"/proc/sys/kernel/io_uring_disabled", "0",
+ TST_SR_SKIP_MISSING | TST_SR_TCONF_RO},
+ {}
+ }
+};
--
2.39.1
--
Mailing list info: https://lists.linux.it/listinfo/ltp
^ permalink raw reply related [flat|nested] 4+ messages in thread* [LTP] [PATCH v2 3/3] io_uring: Refactor io_uring01 to use common code
2026-03-24 5:22 [LTP] [PATCH v2 0/3] io_uring READ(V), WRITE(v) operation tests Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 1/3] io_uring: Test IORING READ and WRITE operations Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 2/3] io_uring: Test READV and WRITEV operations Sachin Sant
@ 2026-03-24 5:22 ` Sachin Sant
2 siblings, 0 replies; 4+ messages in thread
From: Sachin Sant @ 2026-03-24 5:22 UTC (permalink / raw)
To: ltp
No functional change.
Refactor io_uring01 test case to use
- common definitions from io_uring_common.h
- remove duplicate structure definitions
- Replace manual munmap/close calls with io_uring_cleanup_queue()
helper function
- Submit SQE using common helper
Signed-off-by: Sachin Sant <sachinp@linux.ibm.com>
---
.../kernel/syscalls/io_uring/io_uring01.c | 145 +++---------------
1 file changed, 22 insertions(+), 123 deletions(-)
diff --git a/testcases/kernel/syscalls/io_uring/io_uring01.c b/testcases/kernel/syscalls/io_uring/io_uring01.c
index ab1ec00d6..4c64e562b 100644
--- a/testcases/kernel/syscalls/io_uring/io_uring01.c
+++ b/testcases/kernel/syscalls/io_uring/io_uring01.c
@@ -11,13 +11,7 @@
* registered in the kernel for long term operation using io_uring_register().
* This tests initiates I/O operations with the help of io_uring_enter().
*/
-#include <stdlib.h>
-#include <errno.h>
-#include <string.h>
-#include <fcntl.h>
-#include "config.h"
-#include "tst_test.h"
-#include "lapi/io_uring.h"
+#include "io_uring_common.h"
#define TEST_FILE "test_file"
@@ -32,96 +26,19 @@ static struct tcase {
{0, IORING_REGISTER_BUFFERS, IORING_OP_READ_FIXED},
};
-struct io_sq_ring {
- unsigned int *head;
- unsigned int *tail;
- unsigned int *ring_mask;
- unsigned int *ring_entries;
- unsigned int *flags;
- unsigned int *array;
-};
-
-struct io_cq_ring {
- unsigned int *head;
- unsigned int *tail;
- unsigned int *ring_mask;
- unsigned int *ring_entries;
- struct io_uring_cqe *cqes;
-};
-
-struct submitter {
- int ring_fd;
- struct io_sq_ring sq_ring;
- struct io_uring_sqe *sqes;
- struct io_cq_ring cq_ring;
-};
-
-static struct submitter sub_ring;
-static struct submitter *s = &sub_ring;
+static struct io_uring_submit s;
static sigset_t sig;
static struct iovec *iov;
-
-static void *sptr;
-static size_t sptr_size;
-static void *cptr;
-static size_t cptr_size;
-
-static int setup_io_uring_test(struct submitter *s, struct tcase *tc)
+static int setup_io_uring_test(struct io_uring_submit *s, struct tcase *tc)
{
- struct io_sq_ring *sring = &s->sq_ring;
- struct io_cq_ring *cring = &s->cq_ring;
- struct io_uring_params p;
+ int ret;
- memset(&p, 0, sizeof(p));
- p.flags |= tc->setup_flags;
- s->ring_fd = io_uring_setup(QUEUE_DEPTH, &p);
- if (s->ring_fd != -1) {
+ ret = io_uring_setup_queue(s, QUEUE_DEPTH, tc->setup_flags);
+ if (ret == 0)
tst_res(TPASS, "io_uring_setup() passed");
- } else {
- tst_res(TFAIL | TERRNO, "io_uring_setup() failed");
- return 1;
- }
-
- sptr_size = p.sq_off.array + p.sq_entries * sizeof(unsigned int);
-
- /* Submission queue ring buffer mapping */
- sptr = SAFE_MMAP(0, sptr_size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- s->ring_fd, IORING_OFF_SQ_RING);
-
- /* Save global submission queue struct info */
- sring->head = sptr + p.sq_off.head;
- sring->tail = sptr + p.sq_off.tail;
- sring->ring_mask = sptr + p.sq_off.ring_mask;
- sring->ring_entries = sptr + p.sq_off.ring_entries;
- sring->flags = sptr + p.sq_off.flags;
- sring->array = sptr + p.sq_off.array;
-
- /* Submission queue entries ring buffer mapping */
- s->sqes = SAFE_MMAP(0, p.sq_entries *
- sizeof(struct io_uring_sqe),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- s->ring_fd, IORING_OFF_SQES);
-
- cptr_size = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
-
- /* Completion queue ring buffer mapping */
- cptr = SAFE_MMAP(0, cptr_size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- s->ring_fd, IORING_OFF_CQ_RING);
-
- /* Save global completion queue struct info */
- cring->head = cptr + p.cq_off.head;
- cring->tail = cptr + p.cq_off.tail;
- cring->ring_mask = cptr + p.cq_off.ring_mask;
- cring->ring_entries = cptr + p.cq_off.ring_entries;
- cring->cqes = cptr + p.cq_off.cqes;
- return 0;
+ return ret;
}
static void check_buffer(char *buffer, size_t len)
@@ -139,7 +56,7 @@ static void check_buffer(char *buffer, size_t len)
tst_res(TPASS, "Buffer filled in correctly");
}
-static void drain_uring_cq(struct submitter *s, unsigned int exp_events)
+static void drain_uring_cq(struct io_uring_submit *s, unsigned int exp_events)
{
struct io_cq_ring *cring = &s->cq_ring;
unsigned int head = *cring->head;
@@ -175,12 +92,10 @@ static void drain_uring_cq(struct submitter *s, unsigned int exp_events)
events, exp_events);
}
-static int submit_to_uring_sq(struct submitter *s, struct tcase *tc)
+static int submit_to_uring_sq(struct io_uring_submit *s, struct tcase *tc)
{
- unsigned int index = 0, tail = 0, next_tail = 0;
- struct io_sq_ring *sring = &s->sq_ring;
- struct io_uring_sqe *sqe;
int ret;
+ int fd;
memset(iov->iov_base, 0, iov->iov_len);
@@ -193,26 +108,13 @@ static int submit_to_uring_sq(struct submitter *s, struct tcase *tc)
return 1;
}
- int fd = SAFE_OPEN(TEST_FILE, O_RDONLY);
-
- /* Submission queue entry addition to SQE ring buffer tail */
- tail = *sring->tail;
- next_tail = tail + 1;
- index = tail & *s->sq_ring.ring_mask;
- sqe = &s->sqes[index];
- sqe->flags = 0;
- sqe->fd = fd;
- sqe->opcode = tc->enter_flags;
- sqe->addr = (unsigned long)iov->iov_base;
- sqe->len = BLOCK_SZ;
- sqe->off = 0;
- sqe->user_data = (unsigned long long)iov;
- sring->array[index] = index;
- tail = next_tail;
-
- /* Kernel to notice the tail update */
- if (*sring->tail != tail)
- *sring->tail = tail;
+ fd = SAFE_OPEN(TEST_FILE, O_RDONLY);
+
+ /* Submit SQE using common helper */
+ io_uring_submit_sqe_internal(s, fd, tc->enter_flags,
+ (unsigned long)iov->iov_base,
+ BLOCK_SZ, 0,
+ (unsigned long long)iov);
ret = io_uring_enter(s->ring_fd, 1, 1, IORING_ENTER_GETEVENTS, &sig);
if (ret == 1) {
@@ -229,23 +131,20 @@ static int submit_to_uring_sq(struct submitter *s, struct tcase *tc)
static void cleanup_io_uring_test(void)
{
- io_uring_register(s->ring_fd, IORING_UNREGISTER_BUFFERS,
+ io_uring_register(s.ring_fd, IORING_UNREGISTER_BUFFERS,
NULL, QUEUE_DEPTH);
- SAFE_MUNMAP(s->sqes, sizeof(struct io_uring_sqe));
- SAFE_MUNMAP(cptr, cptr_size);
- SAFE_MUNMAP(sptr, sptr_size);
- SAFE_CLOSE(s->ring_fd);
+ io_uring_cleanup_queue(&s, QUEUE_DEPTH);
}
static void run(unsigned int n)
{
struct tcase *tc = &tcases[n];
- if (setup_io_uring_test(s, tc))
+ if (setup_io_uring_test(&s, tc))
return;
- if (!submit_to_uring_sq(s, tc))
- drain_uring_cq(s, 1);
+ if (!submit_to_uring_sq(&s, tc))
+ drain_uring_cq(&s, 1);
cleanup_io_uring_test();
}
--
2.39.1
--
Mailing list info: https://lists.linux.it/listinfo/ltp
^ permalink raw reply related [flat|nested] 4+ messages in thread