From: Paolo Bonzini <pbonzini@redhat.com>
To: Eric Blake <eblake@redhat.com>, qemu-devel@nongnu.org
Cc: kwolf@redhat.com, famz@redhat.com, qemu-block@nongnu.org,
stefanha@redhat.com, Max Reitz <mreitz@redhat.com>
Subject: Re: [Qemu-devel] [PATCH v3 4/6] nbd: Rely on block layer to break up large requests
Date: Mon, 18 Jul 2016 10:16:11 +0200 [thread overview]
Message-ID: <5bb015ef-6422-d1be-94a4-f63f1e9f95ed@redhat.com> (raw)
In-Reply-To: <1468607524-19021-5-git-send-email-eblake@redhat.com>
On 15/07/2016 20:32, Eric Blake wrote:
> Now that the block layer will honor max_transfer, we can simplify
> our code to rely on that guarantee.
>
> The readv code can call directly into nbd-client, just as the
> writev code has done since commit 52a4650.
>
> Interestingly enough, while qemu-io 'w 0 40m' splits into a 32M
> and 8M transaction, 'w -z 0 40m' splits into two 16M and an 8M,
> because the block layer caps the bounce buffer for writing zeroes
> at 16M. When we later introduce support for NBD_CMD_WRITE_ZEROES,
> we can get a full 32M zero write (or larger, if the client and
> server negotiate that write zeroes can use a larger size than
> ordinary writes).
>
> Signed-off-by: Eric Blake <eblake@redhat.com>
> Reviewed-by: Fam Zheng <famz@redhat.com>
> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> block/nbd-client.c | 51 ++++++++-------------------------------------------
> block/nbd.c | 12 +++---------
> 2 files changed, 11 insertions(+), 52 deletions(-)
>
> diff --git a/block/nbd-client.c b/block/nbd-client.c
> index 4cc408d..f1fb58b 100644
> --- a/block/nbd-client.c
> +++ b/block/nbd-client.c
> @@ -217,15 +217,15 @@ static void nbd_coroutine_end(NbdClientSession *s,
> }
> }
>
> -static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov,
> - int offset)
> +int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
> + int nb_sectors, QEMUIOVector *qiov)
> {
> NbdClientSession *client = nbd_get_client_session(bs);
> struct nbd_request request = { .type = NBD_CMD_READ };
> struct nbd_reply reply;
> ssize_t ret;
>
> + assert(nb_sectors <= NBD_MAX_SECTORS);
> request.from = sector_num * 512;
> request.len = nb_sectors * 512;
>
> @@ -234,16 +234,15 @@ static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
> if (ret < 0) {
> reply.error = -ret;
> } else {
> - nbd_co_receive_reply(client, &request, &reply, qiov, offset);
> + nbd_co_receive_reply(client, &request, &reply, qiov, 0);
> }
> nbd_coroutine_end(client, &request);
> return -reply.error;
>
> }
>
> -static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov,
> - int offset, int flags)
> +int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
> + int nb_sectors, QEMUIOVector *qiov, int flags)
> {
> NbdClientSession *client = nbd_get_client_session(bs);
> struct nbd_request request = { .type = NBD_CMD_WRITE };
> @@ -255,11 +254,12 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
> request.type |= NBD_CMD_FLAG_FUA;
> }
>
> + assert(nb_sectors <= NBD_MAX_SECTORS);
> request.from = sector_num * 512;
> request.len = nb_sectors * 512;
>
> nbd_coroutine_start(client, &request);
> - ret = nbd_co_send_request(bs, &request, qiov, offset);
> + ret = nbd_co_send_request(bs, &request, qiov, 0);
> if (ret < 0) {
> reply.error = -ret;
> } else {
> @@ -269,41 +269,6 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
> return -reply.error;
> }
>
> -int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov)
> -{
> - int offset = 0;
> - int ret;
> - while (nb_sectors > NBD_MAX_SECTORS) {
> - ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
> - if (ret < 0) {
> - return ret;
> - }
> - offset += NBD_MAX_SECTORS * 512;
> - sector_num += NBD_MAX_SECTORS;
> - nb_sectors -= NBD_MAX_SECTORS;
> - }
> - return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
> -}
> -
> -int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov, int flags)
> -{
> - int offset = 0;
> - int ret;
> - while (nb_sectors > NBD_MAX_SECTORS) {
> - ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset,
> - flags);
> - if (ret < 0) {
> - return ret;
> - }
> - offset += NBD_MAX_SECTORS * 512;
> - sector_num += NBD_MAX_SECTORS;
> - nb_sectors -= NBD_MAX_SECTORS;
> - }
> - return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset, flags);
> -}
> -
> int nbd_client_co_flush(BlockDriverState *bs)
> {
> NbdClientSession *client = nbd_get_client_session(bs);
> diff --git a/block/nbd.c b/block/nbd.c
> index 08e5b67..8a13078 100644
> --- a/block/nbd.c
> +++ b/block/nbd.c
> @@ -349,12 +349,6 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
> return ret;
> }
>
> -static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov)
> -{
> - return nbd_client_co_readv(bs, sector_num, nb_sectors, qiov);
> -}
> -
> static int nbd_co_flush(BlockDriverState *bs)
> {
> return nbd_client_co_flush(bs);
> @@ -450,7 +444,7 @@ static BlockDriver bdrv_nbd = {
> .instance_size = sizeof(BDRVNBDState),
> .bdrv_parse_filename = nbd_parse_filename,
> .bdrv_file_open = nbd_open,
> - .bdrv_co_readv = nbd_co_readv,
> + .bdrv_co_readv = nbd_client_co_readv,
> .bdrv_co_writev_flags = nbd_client_co_writev,
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
> @@ -468,7 +462,7 @@ static BlockDriver bdrv_nbd_tcp = {
> .instance_size = sizeof(BDRVNBDState),
> .bdrv_parse_filename = nbd_parse_filename,
> .bdrv_file_open = nbd_open,
> - .bdrv_co_readv = nbd_co_readv,
> + .bdrv_co_readv = nbd_client_co_readv,
> .bdrv_co_writev_flags = nbd_client_co_writev,
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
> @@ -486,7 +480,7 @@ static BlockDriver bdrv_nbd_unix = {
> .instance_size = sizeof(BDRVNBDState),
> .bdrv_parse_filename = nbd_parse_filename,
> .bdrv_file_open = nbd_open,
> - .bdrv_co_readv = nbd_co_readv,
> + .bdrv_co_readv = nbd_client_co_readv,
> .bdrv_co_writev_flags = nbd_client_co_writev,
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
next prev parent reply other threads:[~2016-07-18 8:16 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-07-15 18:31 [Qemu-devel] [PATCH v3 0/6] Auto-fragment large transactions at the block layer Eric Blake
2016-07-15 18:31 ` [Qemu-devel] [PATCH v3 1/6] block: Fragment reads to max transfer length Eric Blake
2016-07-15 18:32 ` [Qemu-devel] [PATCH v3 2/6] raw_bsd: Don't advertise flags not supported by protocol layer Eric Blake
2016-07-15 18:32 ` [Qemu-devel] [PATCH v3 3/6] block: Fragment writes to max transfer length Eric Blake
2016-07-15 18:32 ` [Qemu-devel] [PATCH v3 4/6] nbd: Rely on block layer to break up large requests Eric Blake
2016-07-18 8:16 ` Paolo Bonzini [this message]
2016-07-15 18:32 ` [Qemu-devel] [PATCH v3 5/6] nbd: Drop unused offset parameter Eric Blake
2016-07-18 8:16 ` Paolo Bonzini
2016-07-15 18:32 ` [Qemu-devel] [PATCH v3 6/6] iscsi: Rely on block layer to break up large requests Eric Blake
2016-07-18 8:16 ` Paolo Bonzini
2016-07-19 16:12 ` [Qemu-devel] [PATCH v3 0/6] Auto-fragment large transactions at the block layer Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5bb015ef-6422-d1be-94a4-f63f1e9f95ed@redhat.com \
--to=pbonzini@redhat.com \
--cc=eblake@redhat.com \
--cc=famz@redhat.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).