From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
To: qemu-block@nongnu.org
Cc: qemu-devel@nongnu.org, mreitz@redhat.com, kwolf@redhat.com,
vsementsov@virtuozzo.com, den@openvz.org
Subject: [PATCH v2 1/3] qemu-io: add aio_discard
Date: Thu, 25 Feb 2021 14:52:03 +0300 [thread overview]
Message-ID: <20210225115205.249923-2-vsementsov@virtuozzo.com> (raw)
In-Reply-To: <20210225115205.249923-1-vsementsov@virtuozzo.com>
Add aio_discard command like existing aio_write. It will be used in
further test.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
qemu-io-cmds.c | 117 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 117 insertions(+)
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
index 97611969cb..28b5c3c092 100644
--- a/qemu-io-cmds.c
+++ b/qemu-io-cmds.c
@@ -1332,6 +1332,7 @@ struct aio_ctx {
BlockBackend *blk;
QEMUIOVector qiov;
int64_t offset;
+ int64_t discard_bytes;
char *buf;
bool qflag;
bool vflag;
@@ -1343,6 +1344,34 @@ struct aio_ctx {
struct timespec t1;
};
+static void aio_discard_done(void *opaque, int ret)
+{
+ struct aio_ctx *ctx = opaque;
+ struct timespec t2;
+
+ clock_gettime(CLOCK_MONOTONIC, &t2);
+
+
+ if (ret < 0) {
+ printf("aio_discard failed: %s\n", strerror(-ret));
+ block_acct_failed(blk_get_stats(ctx->blk), &ctx->acct);
+ goto out;
+ }
+
+ block_acct_done(blk_get_stats(ctx->blk), &ctx->acct);
+
+ if (ctx->qflag) {
+ goto out;
+ }
+
+ /* Finally, report back -- -C gives a parsable format */
+ t2 = tsub(t2, ctx->t1);
+ print_report("discarded", &t2, ctx->offset, ctx->discard_bytes,
+ ctx->discard_bytes, 1, ctx->Cflag);
+out:
+ g_free(ctx);
+}
+
static void aio_write_done(void *opaque, int ret)
{
struct aio_ctx *ctx = opaque;
@@ -1671,6 +1700,93 @@ static int aio_write_f(BlockBackend *blk, int argc, char **argv)
return 0;
}
+static void aio_discard_help(void)
+{
+ printf(
+"\n"
+" asynchronously discards a range of bytes from the given offset\n"
+"\n"
+" Example:\n"
+" 'aio_discard 0 64k' - discards 64K at start of a disk\n"
+"\n"
+" Note that due to its asynchronous nature, this command will be\n"
+" considered successful once the request is submitted, independently\n"
+" of potential I/O errors or pattern mismatches.\n"
+" -C, -- report statistics in a machine parsable format\n"
+" -i, -- treat request as invalid, for exercising stats\n"
+" -q, -- quiet mode, do not show I/O statistics\n"
+"\n");
+}
+
+static int aio_discard_f(BlockBackend *blk, int argc, char **argv);
+
+static const cmdinfo_t aio_discard_cmd = {
+ .name = "aio_discard",
+ .cfunc = aio_discard_f,
+ .perm = BLK_PERM_WRITE,
+ .argmin = 2,
+ .argmax = -1,
+ .args = "[-Ciq] off len",
+ .oneline = "asynchronously discards a number of bytes",
+ .help = aio_discard_help,
+};
+
+static int aio_discard_f(BlockBackend *blk, int argc, char **argv)
+{
+ int ret;
+ int c;
+ struct aio_ctx *ctx = g_new0(struct aio_ctx, 1);
+
+ ctx->blk = blk;
+ while ((c = getopt(argc, argv, "Ciq")) != -1) {
+ switch (c) {
+ case 'C':
+ ctx->Cflag = true;
+ break;
+ case 'q':
+ ctx->qflag = true;
+ break;
+ case 'i':
+ printf("injecting invalid discard request\n");
+ block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_UNMAP);
+ g_free(ctx);
+ return 0;
+ default:
+ g_free(ctx);
+ qemuio_command_usage(&aio_write_cmd);
+ return -EINVAL;
+ }
+ }
+
+ if (optind != argc - 2) {
+ g_free(ctx);
+ qemuio_command_usage(&aio_write_cmd);
+ return -EINVAL;
+ }
+
+ ctx->offset = cvtnum(argv[optind]);
+ if (ctx->offset < 0) {
+ ret = ctx->offset;
+ print_cvtnum_err(ret, argv[optind]);
+ g_free(ctx);
+ return ret;
+ }
+ optind++;
+
+ ctx->discard_bytes = cvtnum(argv[optind]);
+ if (ctx->discard_bytes < 0) {
+ ret = ctx->discard_bytes;
+ print_cvtnum_err(ret, argv[optind]);
+ g_free(ctx);
+ return ret;
+ }
+
+ blk_aio_pdiscard(blk, ctx->offset, ctx->discard_bytes,
+ aio_discard_done, ctx);
+
+ return 0;
+}
+
static int aio_flush_f(BlockBackend *blk, int argc, char **argv)
{
BlockAcctCookie cookie;
@@ -2494,6 +2610,7 @@ static void __attribute((constructor)) init_qemuio_commands(void)
qemuio_add_command(&readv_cmd);
qemuio_add_command(&write_cmd);
qemuio_add_command(&writev_cmd);
+ qemuio_add_command(&aio_discard_cmd);
qemuio_add_command(&aio_read_cmd);
qemuio_add_command(&aio_write_cmd);
qemuio_add_command(&aio_flush_cmd);
--
2.29.2
next prev parent reply other threads:[~2021-02-25 11:53 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-25 11:52 [PATCH v2(RFC) 0/3] qcow2: fix parallel rewrite and discard Vladimir Sementsov-Ogievskiy
2021-02-25 11:52 ` Vladimir Sementsov-Ogievskiy [this message]
2021-02-25 11:52 ` [PATCH v2 2/3] iotests: add qcow2-discard-during-rewrite Vladimir Sementsov-Ogievskiy
2021-02-25 11:52 ` [PATCH v2 3/3] block/qcow2: introduce inflight writes counters: fix discard Vladimir Sementsov-Ogievskiy
2021-03-12 15:24 ` [PATCH v2(RFC) 0/3] qcow2: fix parallel rewrite and discard Kevin Wolf
2021-03-12 15:54 ` Vladimir Sementsov-Ogievskiy
2021-03-18 15:37 ` Vladimir Sementsov-Ogievskiy
2021-03-18 15:51 ` Vladimir Sementsov-Ogievskiy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210225115205.249923-2-vsementsov@virtuozzo.com \
--to=vsementsov@virtuozzo.com \
--cc=den@openvz.org \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).