From: Eric Blake <eblake@redhat.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, kwolf@redhat.com,
Paolo Bonzini <pbonzini@redhat.com>,
Max Reitz <mreitz@redhat.com>
Subject: [Qemu-devel] [PATCH 4/5] nbd: Rely on block layer to break up large requests
Date: Mon, 20 Jun 2016 17:39:28 -0600 [thread overview]
Message-ID: <1466465969-25315-5-git-send-email-eblake@redhat.com> (raw)
In-Reply-To: <1466465969-25315-1-git-send-email-eblake@redhat.com>
Now that the block layer will honor max_transfer, we can simplify
our code to rely on that guarantee.
The readv code can call directly into nbd-client, just as the
writev code has done since commit 52a4650.
Interestingly enough, while qemu-io 'w 0 40m' splits into a 32M
and 8M transaction, 'w -z 0 40m' splits into two 16M and an 8M,
because the block layer caps the bounce buffer for writing zeroes
at 16M. When we later introduce support for NBD_CMD_WRITE_ZEROES,
we can get a full 32M zero write (or larger, if the client and
server negotiate that write zeroes can use a larger size than
ordinary writes).
Signed-off-by: Eric Blake <eblake@redhat.com>
---
block/nbd-client.c | 51 ++++++++-------------------------------------------
block/nbd.c | 12 +++---------
2 files changed, 11 insertions(+), 52 deletions(-)
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 420bce8..9f023f8 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -217,15 +217,15 @@ static void nbd_coroutine_end(NbdClientSession *s,
}
}
-static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov,
- int offset)
+int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, QEMUIOVector *qiov)
{
NbdClientSession *client = nbd_get_client_session(bs);
struct nbd_request request = { .type = NBD_CMD_READ };
struct nbd_reply reply;
ssize_t ret;
+ assert(nb_sectors <= NBD_MAX_SECTORS);
request.from = sector_num * 512;
request.len = nb_sectors * 512;
@@ -234,16 +234,15 @@ static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
if (ret < 0) {
reply.error = -ret;
} else {
- nbd_co_receive_reply(client, &request, &reply, qiov, offset);
+ nbd_co_receive_reply(client, &request, &reply, qiov, 0);
}
nbd_coroutine_end(client, &request);
return -reply.error;
}
-static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov,
- int offset, int flags)
+int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, QEMUIOVector *qiov, int flags)
{
NbdClientSession *client = nbd_get_client_session(bs);
struct nbd_request request = { .type = NBD_CMD_WRITE };
@@ -255,11 +254,12 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
request.type |= NBD_CMD_FLAG_FUA;
}
+ assert(nb_sectors <= NBD_MAX_SECTORS);
request.from = sector_num * 512;
request.len = nb_sectors * 512;
nbd_coroutine_start(client, &request);
- ret = nbd_co_send_request(bs, &request, qiov, offset);
+ ret = nbd_co_send_request(bs, &request, qiov, 0);
if (ret < 0) {
reply.error = -ret;
} else {
@@ -269,41 +269,6 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
return -reply.error;
}
-int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov)
-{
- int offset = 0;
- int ret;
- while (nb_sectors > NBD_MAX_SECTORS) {
- ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
- if (ret < 0) {
- return ret;
- }
- offset += NBD_MAX_SECTORS * 512;
- sector_num += NBD_MAX_SECTORS;
- nb_sectors -= NBD_MAX_SECTORS;
- }
- return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
-}
-
-int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov, int flags)
-{
- int offset = 0;
- int ret;
- while (nb_sectors > NBD_MAX_SECTORS) {
- ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset,
- flags);
- if (ret < 0) {
- return ret;
- }
- offset += NBD_MAX_SECTORS * 512;
- sector_num += NBD_MAX_SECTORS;
- nb_sectors -= NBD_MAX_SECTORS;
- }
- return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset, flags);
-}
-
int nbd_client_co_flush(BlockDriverState *bs)
{
NbdClientSession *client = nbd_get_client_session(bs);
diff --git a/block/nbd.c b/block/nbd.c
index 08e5b67..8a13078 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -349,12 +349,6 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
return ret;
}
-static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov)
-{
- return nbd_client_co_readv(bs, sector_num, nb_sectors, qiov);
-}
-
static int nbd_co_flush(BlockDriverState *bs)
{
return nbd_client_co_flush(bs);
@@ -450,7 +444,7 @@ static BlockDriver bdrv_nbd = {
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
- .bdrv_co_readv = nbd_co_readv,
+ .bdrv_co_readv = nbd_client_co_readv,
.bdrv_co_writev_flags = nbd_client_co_writev,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
@@ -468,7 +462,7 @@ static BlockDriver bdrv_nbd_tcp = {
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
- .bdrv_co_readv = nbd_co_readv,
+ .bdrv_co_readv = nbd_client_co_readv,
.bdrv_co_writev_flags = nbd_client_co_writev,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
@@ -486,7 +480,7 @@ static BlockDriver bdrv_nbd_unix = {
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
- .bdrv_co_readv = nbd_co_readv,
+ .bdrv_co_readv = nbd_client_co_readv,
.bdrv_co_writev_flags = nbd_client_co_writev,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
--
2.5.5
next prev parent reply other threads:[~2016-06-20 23:40 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-20 23:39 [Qemu-devel] [PATCH 0/5] Auto-fragment large transactions at the block layer Eric Blake
2016-06-20 23:39 ` [Qemu-devel] [PATCH 1/5] block: Fragment reads to max transfer length Eric Blake
2016-07-08 10:56 ` Kevin Wolf
2016-07-08 14:31 ` Eric Blake
2016-06-20 23:39 ` [Qemu-devel] [PATCH 2/5] block: Fragment writes " Eric Blake
2016-06-20 23:39 ` [Qemu-devel] [PATCH 3/5] raw_bsd: Don't advertise flags not supported by protocol layer Eric Blake
2016-07-08 11:05 ` Kevin Wolf
2016-07-08 14:32 ` Eric Blake
2016-06-20 23:39 ` Eric Blake [this message]
2016-06-20 23:39 ` [Qemu-devel] [PATCH 5/5] nbd: Drop unused offset parameter Eric Blake
2016-07-08 11:11 ` Kevin Wolf
2016-06-21 3:19 ` [Qemu-devel] [PATCH 6/5] iscsi: Rely on block layer to break up large requests Eric Blake
2016-06-21 4:17 ` [Qemu-devel] [PATCH 0/5] Auto-fragment large transactions at the block layer Eric Blake
2016-06-21 10:23 ` Stefan Hajnoczi
2016-06-21 10:43 ` Kevin Wolf
2016-06-22 11:41 ` Stefan Hajnoczi
2016-06-21 22:05 ` Eric Blake
2016-06-22 11:41 ` Stefan Hajnoczi
2016-06-22 5:54 ` Fam Zheng
2016-07-06 2:04 ` Eric Blake
2016-07-08 11:15 ` Kevin Wolf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1466465969-25315-5-git-send-email-eblake@redhat.com \
--to=eblake@redhat.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).