From: Cyril Hrubis <chrubis@suse.cz>
To: Sachin Sant <sachinp@linux.ibm.com>
Cc: ltp@lists.linux.it
Subject: Re: [LTP] [PATCH v2 2/3] io_uring: Test READV and WRITEV operations
Date: Wed, 15 Apr 2026 11:59:16 +0200 [thread overview]
Message-ID: <ad9hdOxg9CYxgNBb@yuki.lan> (raw)
In-Reply-To: <20260324052241.21455-3-sachinp@linux.ibm.com>
Hi!
> +#include "io_uring_common.h"
> +
> +#define TEST_FILE "io_uring_test_file"
> +#define QUEUE_DEPTH 2
> +#define NUM_VECS 4
> +#define VEC_SIZE 1024
> +#define VAR_BUF1_SIZE 512
> +#define VAR_BUF2_SIZE 1024
> +#define VAR_BUF3_SIZE 256
> +
> +static struct iovec *write_iovs, *read_iovs;
> +static struct iovec *var_write_iovs, *var_read_iovs;
> +static struct io_uring_submit s;
> +static sigset_t sig;
> +
> +static void prepare_write_buffers(void)
> +{
> + size_t i, j;
> + char *buf;
> +
> + for (i = 0; i < NUM_VECS; i++) {
> + buf = (char *)write_iovs[i].iov_base;
> + for (j = 0; j < write_iovs[i].iov_len; j++) {
> + /* Each vector has a different pattern */
> + buf[j] = 'A' + i + (j % 26);
> + }
> + }
> +}
> +
> +static void prepare_read_buffers(void)
> +{
> + size_t i;
> +
> + for (i = 0; i < NUM_VECS; i++)
> + memset(read_iovs[i].iov_base, 0, read_iovs[i].iov_len);
> +}
> +
> +static void clear_iovec_buffers(struct iovec *iovs, int nvecs)
> +{
> + int i;
> +
> + for (i = 0; i < nvecs; i++)
> + memset(iovs[i].iov_base, 0, iovs[i].iov_len);
> +}
These two functions are nearly identical, we really need only one
function to clear the buffers.
> +static void verify_iovec_data(struct iovec *write_iovs, struct iovec *read_iovs,
> + int nvecs, const char *test_name)
> +{
> + int i;
> + size_t j;
> +
> + for (i = 0; i < nvecs; i++) {
> + if (write_iovs[i].iov_len != read_iovs[i].iov_len) {
> + tst_res(TFAIL, "%s: iovec %d length mismatch: write=%zu read=%zu",
> + test_name, i, write_iovs[i].iov_len, read_iovs[i].iov_len);
> + return;
> + }
> +
> + if (memcmp(write_iovs[i].iov_base, read_iovs[i].iov_base,
> + write_iovs[i].iov_len) != 0) {
> + tst_res(TFAIL, "%s: data mismatch in vector %d", test_name, i);
> + for (j = 0; j < write_iovs[i].iov_len && j < 64; j++) {
> + char *wbuf = (char *)write_iovs[i].iov_base;
> + char *rbuf = (char *)read_iovs[i].iov_base;
> + if (wbuf[j] != rbuf[j]) {
> + tst_res(TINFO, "Vector %d: first mismatch at "
> + "offset %zu: wrote 0x%02x, read 0x%02x",
> + i, j, wbuf[j], rbuf[j]);
> + break;
> + }
> + }
> + return;
> + }
> + }
> +
> + tst_res(TPASS, "%s: data integrity verified across %d vectors",
> + test_name, nvecs);
> +}
> +
> +
> +static void test_writev_readv(void)
> +{
> + int fd;
> + int total_size = NUM_VECS * VEC_SIZE;
> +
> + tst_res(TINFO, "Testing IORING_OP_WRITEV and IORING_OP_READV");
> +
> + fd = SAFE_OPEN(TEST_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
> +
> + tst_res(TINFO, "Writing %d bytes using %d vectors", total_size, NUM_VECS);
> + io_uring_do_vec_io_op(&s, fd, IORING_OP_WRITEV, write_iovs, NUM_VECS,
> + 0, total_size, &sig);
> +
> + SAFE_FSYNC(fd);
> +
> + tst_res(TINFO, "Reading %d bytes using %d vectors", total_size, NUM_VECS);
> + io_uring_do_vec_io_op(&s, fd, IORING_OP_READV, read_iovs, NUM_VECS,
> + 0, total_size, &sig);
> +
> + verify_iovec_data(write_iovs, read_iovs, NUM_VECS, "Basic vectored I/O");
> +
> + SAFE_CLOSE(fd);
> +}
> +
> +static void test_partial_vectors(void)
> +{
> + int fd;
> + int half_size = 2 * VEC_SIZE;
> +
> + tst_res(TINFO, "Testing partial vector operations");
> +
> + fd = SAFE_OPEN(TEST_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
> +
> + /* Write first half using first 2 vectors at offset 0 */
> + io_uring_do_vec_io_op(&s, fd, IORING_OP_WRITEV, write_iovs, 2, 0,
> + half_size, &sig);
> +
> + /* Write second half using next 2 vectors at offset half_size */
> + io_uring_do_vec_io_op(&s, fd, IORING_OP_WRITEV, &write_iovs[2], 2,
> + half_size, half_size, &sig);
> +
> + SAFE_FSYNC(fd);
> +
> + /* Read back entire file using all 4 vectors */
> + io_uring_do_vec_io_op(&s, fd, IORING_OP_READV, read_iovs, NUM_VECS, 0,
> + NUM_VECS * VEC_SIZE, &sig);
> +
> + verify_iovec_data(write_iovs, read_iovs, NUM_VECS, "Partial vector I/O");
> +
> + SAFE_CLOSE(fd);
> +}
> +
> +static void test_varying_sizes(void)
> +{
> + int fd;
> + int expected_size = VAR_BUF1_SIZE + VAR_BUF2_SIZE + VAR_BUF3_SIZE;
> +
> + tst_res(TINFO, "Testing vectors with varying sizes including zero-length buffer");
> +
> + io_uring_init_buffer_pattern(var_write_iovs[0].iov_base, VAR_BUF1_SIZE, 'X');
> + io_uring_init_buffer_pattern(var_write_iovs[2].iov_base, VAR_BUF2_SIZE, 'Y');
> + io_uring_init_buffer_pattern(var_write_iovs[3].iov_base, VAR_BUF3_SIZE, 'Z');
Can we handle this with the prepare_write_buffers() as well?
> + clear_iovec_buffers(var_read_iovs, 4);
> +
> + fd = SAFE_OPEN(TEST_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
> +
> + io_uring_do_vec_io_op(&s, fd, IORING_OP_WRITEV, var_write_iovs, 4, 0,
> + expected_size, &sig);
> +
> + SAFE_FSYNC(fd);
> +
> + io_uring_do_vec_io_op(&s, fd, IORING_OP_READV, var_read_iovs, 4, 0,
> + expected_size, &sig);
> +
> + verify_iovec_data(var_write_iovs, var_read_iovs, 4, "Varying size vector I/O with zero-length buffer");
> +
> + SAFE_CLOSE(fd);
> +}
> +
> +static void run(void)
> +{
> + test_writev_readv();
> + test_partial_vectors();
> + test_varying_sizes();
> +}
> +
> +static void setup(void)
> +{
> + io_uring_setup_supported_by_kernel();
> + sigemptyset(&sig);
> + memset(&s, 0, sizeof(s));
> + io_uring_setup_queue(&s, QUEUE_DEPTH, 0);
> + prepare_write_buffers();
> + prepare_read_buffers();
> +}
> +
> +static void cleanup(void)
> +{
> + io_uring_cleanup_queue(&s, QUEUE_DEPTH);
> +}
> +
> +static struct tst_test test = {
> + .test_all = run,
> + .setup = setup,
> + .cleanup = cleanup,
> + .needs_tmpdir = 1,
> + .bufs = (struct tst_buffers []) {
> + {&write_iovs, .iov_sizes = (int[]){VEC_SIZE, VEC_SIZE, VEC_SIZE, VEC_SIZE, -1}},
> + {&read_iovs, .iov_sizes = (int[]){VEC_SIZE, VEC_SIZE, VEC_SIZE, VEC_SIZE, -1}},
> + {&var_write_iovs, .iov_sizes = (int[]){VAR_BUF1_SIZE, 0, VAR_BUF2_SIZE, VAR_BUF3_SIZE, -1}},
> + {&var_read_iovs, .iov_sizes = (int[]){VAR_BUF1_SIZE, 0, VAR_BUF2_SIZE, VAR_BUF3_SIZE, -1}},
> + {}
> + },
> + .save_restore = (const struct tst_path_val[]) {
> + {"/proc/sys/kernel/io_uring_disabled", "0",
> + TST_SR_SKIP_MISSING | TST_SR_TCONF_RO},
> + {}
> + }
> +};
> --
> 2.39.1
>
--
Cyril Hrubis
chrubis@suse.cz
--
Mailing list info: https://lists.linux.it/listinfo/ltp
next prev parent reply other threads:[~2026-04-15 9:59 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-24 5:22 [LTP] [PATCH v2 0/3] io_uring READ(V), WRITE(v) operation tests Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 1/3] io_uring: Test IORING READ and WRITE operations Sachin Sant
2026-04-16 8:55 ` Martin Doucha
2026-04-16 9:36 ` Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 2/3] io_uring: Test READV and WRITEV operations Sachin Sant
2026-04-15 9:59 ` Cyril Hrubis [this message]
2026-04-15 13:56 ` Sachin Sant
2026-03-24 5:22 ` [LTP] [PATCH v2 3/3] io_uring: Refactor io_uring01 to use common code Sachin Sant
2026-04-15 9:35 ` [LTP] [PATCH v2 0/3] io_uring READ(V), WRITE(v) operation tests Cyril Hrubis
2026-04-15 13:58 ` Sachin Sant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ad9hdOxg9CYxgNBb@yuki.lan \
--to=chrubis@suse.cz \
--cc=ltp@lists.linux.it \
--cc=sachinp@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox