From: Jeff Cody <jcody@redhat.com>
To: Eric Blake <eblake@redhat.com>
Cc: qemu-devel@nongnu.org, kwolf@redhat.com, qemu-block@nongnu.org,
jsnow@redhat.com, Max Reitz <mreitz@redhat.com>
Subject: Re: [Qemu-devel] [PATCH v3 12/20] mirror: Switch mirror_iteration() to byte-based
Date: Fri, 30 Jun 2017 17:14:58 -0400 [thread overview]
Message-ID: <20170630211458.GL4997@localhost.localdomain> (raw)
In-Reply-To: <20170627192458.15519-13-eblake@redhat.com>
On Tue, Jun 27, 2017 at 02:24:50PM -0500, Eric Blake wrote:
> We are gradually converting to byte-based interfaces, as they are
> easier to reason about than sector-based. Change the internal
> loop iteration of mirroring to track by bytes instead of sectors
> (although we are still guaranteed that we iterate by steps that
> are both sector-aligned and multiples of the granularity). Drop
> the now-unused mirror_clip_sectors().
>
> Signed-off-by: Eric Blake <eblake@redhat.com>
> Reviewed-by: John Snow <jsnow@redhat.com>
>
Reviewed-by: Jeff Cody <jcody@redhat.com>
> ---
> v3: rebase to Paolo's thread-safety changes, R-b kept
> v2: straightforward rebase to earlier mirror_clip_bytes() change, R-b kept
> ---
> block/mirror.c | 105 +++++++++++++++++++++++++--------------------------------
> 1 file changed, 46 insertions(+), 59 deletions(-)
>
> diff --git a/block/mirror.c b/block/mirror.c
> index 81ff784..0eb2af4 100644
> --- a/block/mirror.c
> +++ b/block/mirror.c
> @@ -184,15 +184,6 @@ static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
> return MIN(bytes, s->bdev_length - offset);
> }
>
> -/* Clip nb_sectors relative to sector_num to not exceed end-of-file */
> -static inline int mirror_clip_sectors(MirrorBlockJob *s,
> - int64_t sector_num,
> - int nb_sectors)
> -{
> - return MIN(nb_sectors,
> - s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
> -}
> -
> /* Round offset and/or bytes to target cluster if COW is needed, and
> * return the offset of the adjusted tail against original. */
> static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
> @@ -336,30 +327,28 @@ static void mirror_do_zero_or_discard(MirrorBlockJob *s,
> static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
> {
> BlockDriverState *source = s->source;
> - int64_t sector_num, first_chunk;
> + int64_t offset, first_chunk;
> uint64_t delay_ns = 0;
> /* At least the first dirty chunk is mirrored in one iteration. */
> int nb_chunks = 1;
> - int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
> int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
> bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
> int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
>
> bdrv_dirty_bitmap_lock(s->dirty_bitmap);
> - sector_num = bdrv_dirty_iter_next(s->dbi);
> - if (sector_num < 0) {
> + offset = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
> + if (offset < 0) {
> bdrv_set_dirty_iter(s->dbi, 0);
> - sector_num = bdrv_dirty_iter_next(s->dbi);
> + offset = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
> trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap) *
> BDRV_SECTOR_SIZE);
> - assert(sector_num >= 0);
> + assert(offset >= 0);
> }
> bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
>
> - first_chunk = sector_num / sectors_per_chunk;
> + first_chunk = offset / s->granularity;
> while (test_bit(first_chunk, s->in_flight_bitmap)) {
> - trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
> - s->in_flight);
> + trace_mirror_yield_in_flight(s, offset, s->in_flight);
> mirror_wait_for_io(s);
> }
>
> @@ -368,25 +357,26 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
> /* Find the number of consective dirty chunks following the first dirty
> * one, and wait for in flight requests in them. */
> bdrv_dirty_bitmap_lock(s->dirty_bitmap);
> - while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
> + while (nb_chunks * s->granularity < s->buf_size) {
> int64_t next_dirty;
> - int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
> - int64_t next_chunk = next_sector / sectors_per_chunk;
> - if (next_sector >= end ||
> - !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_sector)) {
> + int64_t next_offset = offset + nb_chunks * s->granularity;
> + int64_t next_chunk = next_offset / s->granularity;
> + if (next_offset >= s->bdev_length ||
> + !bdrv_get_dirty_locked(source, s->dirty_bitmap,
> + next_offset >> BDRV_SECTOR_BITS)) {
> break;
> }
> if (test_bit(next_chunk, s->in_flight_bitmap)) {
> break;
> }
>
> - next_dirty = bdrv_dirty_iter_next(s->dbi);
> - if (next_dirty > next_sector || next_dirty < 0) {
> + next_dirty = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
> + if (next_dirty > next_offset || next_dirty < 0) {
> /* The bitmap iterator's cache is stale, refresh it */
> - bdrv_set_dirty_iter(s->dbi, next_sector);
> - next_dirty = bdrv_dirty_iter_next(s->dbi);
> + bdrv_set_dirty_iter(s->dbi, next_offset >> BDRV_SECTOR_BITS);
> + next_dirty = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
> }
> - assert(next_dirty == next_sector);
> + assert(next_dirty == next_offset);
> nb_chunks++;
> }
>
> @@ -394,14 +384,15 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
> * calling bdrv_get_block_status_above could yield - if some blocks are
> * marked dirty in this window, we need to know.
> */
> - bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, sector_num,
> - nb_chunks * sectors_per_chunk);
> + bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset >> BDRV_SECTOR_BITS,
> + nb_chunks * sectors_per_chunk);
> bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
>
> - bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
> - while (nb_chunks > 0 && sector_num < end) {
> + bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
> + while (nb_chunks > 0 && offset < s->bdev_length) {
> int64_t ret;
> int io_sectors;
> + unsigned int io_bytes;
> int64_t io_bytes_acct;
> BlockDriverState *file;
> enum MirrorMethod {
> @@ -410,28 +401,28 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
> MIRROR_METHOD_DISCARD
> } mirror_method = MIRROR_METHOD_COPY;
>
> - assert(!(sector_num % sectors_per_chunk));
> - ret = bdrv_get_block_status_above(source, NULL, sector_num,
> + assert(!(offset % s->granularity));
> + ret = bdrv_get_block_status_above(source, NULL,
> + offset >> BDRV_SECTOR_BITS,
> nb_chunks * sectors_per_chunk,
> &io_sectors, &file);
> + io_bytes = io_sectors * BDRV_SECTOR_SIZE;
> if (ret < 0) {
> - io_sectors = MIN(nb_chunks * sectors_per_chunk,
> - max_io_bytes >> BDRV_SECTOR_BITS);
> + io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
> } else if (ret & BDRV_BLOCK_DATA) {
> - io_sectors = MIN(io_sectors, max_io_bytes >> BDRV_SECTOR_BITS);
> + io_bytes = MIN(io_bytes, max_io_bytes);
> }
>
> - io_sectors -= io_sectors % sectors_per_chunk;
> - if (io_sectors < sectors_per_chunk) {
> - io_sectors = sectors_per_chunk;
> + io_bytes -= io_bytes % s->granularity;
> + if (io_bytes < s->granularity) {
> + io_bytes = s->granularity;
> } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
> - int64_t target_sector_num;
> - int target_nb_sectors;
> - bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
> - io_sectors, &target_sector_num,
> - &target_nb_sectors);
> - if (target_sector_num == sector_num &&
> - target_nb_sectors == io_sectors) {
> + int64_t target_offset;
> + unsigned int target_bytes;
> + bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
> + &target_offset, &target_bytes);
> + if (target_offset == offset &&
> + target_bytes == io_bytes) {
> mirror_method = ret & BDRV_BLOCK_ZERO ?
> MIRROR_METHOD_ZERO :
> MIRROR_METHOD_DISCARD;
> @@ -439,8 +430,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
> }
>
> while (s->in_flight >= MAX_IN_FLIGHT) {
> - trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
> - s->in_flight);
> + trace_mirror_yield_in_flight(s, offset, s->in_flight);
> mirror_wait_for_io(s);
> }
>
> @@ -448,30 +438,27 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
> return 0;
> }
>
> - io_sectors = mirror_clip_sectors(s, sector_num, io_sectors);
> + io_bytes = mirror_clip_bytes(s, offset, io_bytes);
> switch (mirror_method) {
> case MIRROR_METHOD_COPY:
> - io_bytes_acct = mirror_do_read(s, sector_num * BDRV_SECTOR_SIZE,
> - io_sectors * BDRV_SECTOR_SIZE);
> - io_sectors = io_bytes_acct / BDRV_SECTOR_SIZE;
> + io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes);
> break;
> case MIRROR_METHOD_ZERO:
> case MIRROR_METHOD_DISCARD:
> - mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE,
> - io_sectors * BDRV_SECTOR_SIZE,
> + mirror_do_zero_or_discard(s, offset, io_bytes,
> mirror_method == MIRROR_METHOD_DISCARD);
> if (write_zeroes_ok) {
> io_bytes_acct = 0;
> } else {
> - io_bytes_acct = io_sectors * BDRV_SECTOR_SIZE;
> + io_bytes_acct = io_bytes;
> }
> break;
> default:
> abort();
> }
> - assert(io_sectors);
> - sector_num += io_sectors;
> - nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
> + assert(io_bytes);
> + offset += io_bytes;
> + nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
> if (s->common.speed) {
> delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct);
> }
> --
> 2.9.4
>
next prev parent reply other threads:[~2017-06-30 21:15 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-06-27 19:24 [Qemu-devel] [PATCH v3 00/20] make bdrv_is_allocated[_above] byte-based Eric Blake
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 01/20] blockjob: Track job ratelimits via bytes, not sectors Eric Blake
2017-06-30 19:42 ` Jeff Cody
2017-07-04 15:28 ` Kevin Wolf
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 02/20] trace: Show blockjob actions " Eric Blake
2017-06-30 19:56 ` Jeff Cody
2017-07-04 15:28 ` Kevin Wolf
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 03/20] stream: Switch stream_populate() to byte-based Eric Blake
2017-06-30 19:57 ` Jeff Cody
2017-07-04 15:28 ` Kevin Wolf
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 04/20] stream: Switch stream_run() " Eric Blake
2017-06-30 20:01 ` Jeff Cody
2017-07-04 15:00 ` Kevin Wolf
2017-07-05 12:13 ` Eric Blake
2017-07-05 13:41 ` Kevin Wolf
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 05/20] commit: Switch commit_populate() " Eric Blake
2017-06-30 20:17 ` Jeff Cody
2017-07-04 15:28 ` Kevin Wolf
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 06/20] commit: Switch commit_run() " Eric Blake
2017-06-30 20:19 ` Jeff Cody
2017-07-04 15:29 ` Kevin Wolf
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 07/20] mirror: Switch MirrorBlockJob " Eric Blake
2017-06-30 20:20 ` Jeff Cody
2017-07-05 11:42 ` Kevin Wolf
2017-07-05 20:18 ` Eric Blake
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 08/20] mirror: Switch mirror_do_zero_or_discard() " Eric Blake
2017-06-30 20:22 ` Jeff Cody
2017-07-05 16:36 ` Kevin Wolf
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 09/20] mirror: Update signature of mirror_clip_sectors() Eric Blake
2017-06-30 20:51 ` Jeff Cody
2017-07-05 16:37 ` Kevin Wolf
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 10/20] mirror: Switch mirror_cow_align() to byte-based Eric Blake
2017-06-30 21:03 ` Jeff Cody
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 11/20] mirror: Switch mirror_do_read() " Eric Blake
2017-06-30 21:07 ` Jeff Cody
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 12/20] mirror: Switch mirror_iteration() " Eric Blake
2017-06-30 21:14 ` Jeff Cody [this message]
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 13/20] block: Drop unused bdrv_round_sectors_to_clusters() Eric Blake
2017-06-30 21:17 ` [Qemu-devel] [Qemu-block] " Jeff Cody
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 14/20] backup: Switch BackupBlockJob to byte-based Eric Blake
2017-06-30 21:18 ` Jeff Cody
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 15/20] backup: Switch block_backup.h " Eric Blake
2017-06-28 0:38 ` Xie Changlong
2017-06-30 21:23 ` Jeff Cody
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 16/20] backup: Switch backup_do_cow() " Eric Blake
2017-06-30 21:24 ` Jeff Cody
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 17/20] backup: Switch backup_run() " Eric Blake
2017-06-30 21:25 ` Jeff Cody
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 18/20] block: Make bdrv_is_allocated() byte-based Eric Blake
2017-06-28 9:14 ` Juan Quintela
2017-06-30 21:32 ` Jeff Cody
2017-07-03 18:22 ` Eric Blake
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 19/20] block: Minimize raw use of bds->total_sectors Eric Blake
2017-06-28 8:57 ` [Qemu-devel] [Qemu-block] " Manos Pitsidianakis
2017-06-30 21:34 ` Jeff Cody
2017-06-27 19:24 ` [Qemu-devel] [PATCH v3 20/20] block: Make bdrv_is_allocated_above() byte-based Eric Blake
2017-06-28 0:38 ` Xie Changlong
2017-06-30 21:36 ` Jeff Cody
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170630211458.GL4997@localhost.localdomain \
--to=jcody@redhat.com \
--cc=eblake@redhat.com \
--cc=jsnow@redhat.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).