From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:50241) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1VGJp7-0001PJ-9P for qemu-devel@nongnu.org; Sun, 01 Sep 2013 22:25:06 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1VGJp2-0003iB-6n for qemu-devel@nongnu.org; Sun, 01 Sep 2013 22:25:01 -0400 Received: from mx1.redhat.com ([209.132.183.28]:56422) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1VGJp1-0003i3-S2 for qemu-devel@nongnu.org; Sun, 01 Sep 2013 22:24:56 -0400 Date: Mon, 2 Sep 2013 10:24:45 +0800 From: Fam Zheng Message-ID: <20130902022445.GA7876@T430s.nay.redhat.com> References: <1378053587-12121-1-git-send-email-benoit@irqsave.net> <1378053587-12121-4-git-send-email-benoit@irqsave.net> MIME-Version: 1.0 Content-Type: text/plain; charset=iso-8859-1 Content-Disposition: inline In-Reply-To: <1378053587-12121-4-git-send-email-benoit@irqsave.net> Content-Transfer-Encoding: quoted-printable Subject: Re: [Qemu-devel] [PATCH V11 3/5] block: Enable the new throttling code in the block layer. Reply-To: famz@redhat.com List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: =?iso-8859-1?Q?Beno=EEt?= Canet Cc: kwolf@redhat.com, stefanha@gmail.com, qemu-devel@nongnu.org, stefanha@redhat.com, pbonzini@redhat.com On Sun, 09/01 18:39, Beno=EEt Canet wrote: > Signed-off-by: Benoit Canet > --- > block.c | 338 +++++++++++++------------------------= -------- > block/qapi.c | 21 ++- > blockdev.c | 100 ++++++++------ > include/block/block.h | 1 - > include/block/block_int.h | 32 +---- > 5 files changed, 164 insertions(+), 328 deletions(-) >=20 > diff --git a/block.c b/block.c > index a387c1a..cf88dc0 100644 > --- a/block.c > +++ b/block.c > @@ -86,13 +86,6 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque)= ; > static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, > int64_t sector_num, int nb_sectors); > =20 > -static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sector= s, > - bool is_write, double elapsed_time, uint64_t *wait); > -static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_writ= e, > - double elapsed_time, uint64_t *wait); > -static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors= , > - bool is_write, int64_t *wait); > - > static QTAILQ_HEAD(, BlockDriverState) bdrv_states =3D > QTAILQ_HEAD_INITIALIZER(bdrv_states); > =20 > @@ -123,69 +116,101 @@ int is_windows_drive(const char *filename) > #endif > =20 > /* throttling disk I/O limits */ > -void bdrv_io_limits_disable(BlockDriverState *bs) > +void bdrv_set_io_limits(BlockDriverState *bs, > + ThrottleConfig *cfg) > { > - bs->io_limits_enabled =3D false; > + int i; > =20 > - do {} while (qemu_co_enter_next(&bs->throttled_reqs)); > + throttle_config(&bs->throttle_state, cfg); > =20 > - if (bs->block_timer) { > - timer_del(bs->block_timer); > - timer_free(bs->block_timer); > - bs->block_timer =3D NULL; > + for (i =3D 0; i < 2; i++) { > + qemu_co_enter_next(&bs->throttled_reqs[i]); > } > +} > + > +/* this function drain all the throttled IOs */ > +static bool bdrv_start_throttled_reqs(BlockDriverState *bs) This function is to drain the throttled reqs, but named start_throttled_r= eqs, why? > +{ > + bool drained =3D false; > + bool enabled =3D bs->io_limits_enabled; > + int i; > + > + bs->io_limits_enabled =3D false; > + > + for (i =3D 0; i < 2; i++) { > + while (qemu_co_enter_next(&bs->throttled_reqs[i])) { > + drained =3D true; > + } > + } > + > + bs->io_limits_enabled =3D enabled; > =20 > - bs->slice_start =3D 0; > - bs->slice_end =3D 0; > + return drained; > } > =20 > -static void bdrv_block_timer(void *opaque) > +void bdrv_io_limits_disable(BlockDriverState *bs) > { > - BlockDriverState *bs =3D opaque; > + bs->io_limits_enabled =3D false; > =20 > - qemu_co_enter_next(&bs->throttled_reqs); > + bdrv_start_throttled_reqs(bs); > + > + throttle_destroy(&bs->throttle_state); > } > =20 > -void bdrv_io_limits_enable(BlockDriverState *bs) > +static void bdrv_throttle_read_timer_cb(void *opaque) > { > - bs->block_timer =3D timer_new_ns(QEMU_CLOCK_VIRTUAL, bdrv_block_ti= mer, bs); > - bs->io_limits_enabled =3D true; > + BlockDriverState *bs =3D opaque; > + qemu_co_enter_next(&bs->throttled_reqs[0]); > } > =20 > -bool bdrv_io_limits_enabled(BlockDriverState *bs) > +static void bdrv_throttle_write_timer_cb(void *opaque) > { > - BlockIOLimit *io_limits =3D &bs->io_limits; > - return io_limits->bps[BLOCK_IO_LIMIT_READ] > - || io_limits->bps[BLOCK_IO_LIMIT_WRITE] > - || io_limits->bps[BLOCK_IO_LIMIT_TOTAL] > - || io_limits->iops[BLOCK_IO_LIMIT_READ] > - || io_limits->iops[BLOCK_IO_LIMIT_WRITE] > - || io_limits->iops[BLOCK_IO_LIMIT_TOTAL]; > + BlockDriverState *bs =3D opaque; > + qemu_co_enter_next(&bs->throttled_reqs[1]); > } > =20 > +/* should be called before bdrv_set_io_limits if a limit is set */ > +void bdrv_io_limits_enable(BlockDriverState *bs) > +{ > + assert(!bs->io_limits_enabled); > + throttle_init(&bs->throttle_state, > + QEMU_CLOCK_VIRTUAL, > + bdrv_throttle_read_timer_cb, > + bdrv_throttle_write_timer_cb, > + bs); > + bs->io_limits_enabled =3D true; > +} > + > +/* This function makes an IO wait if needed > + * > + * @nb_sectors: the number of sectors of the IO > + * @is_write: is the IO a write > + */ > static void bdrv_io_limits_intercept(BlockDriverState *bs, > - bool is_write, int nb_sectors) > + int nb_sectors, > + bool is_write) > { > - int64_t wait_time =3D -1; > + /* does this io must wait */ > + bool must_wait =3D throttle_schedule_timer(&bs->throttle_state, is= _write); > =20 > - if (!qemu_co_queue_empty(&bs->throttled_reqs)) { > - qemu_co_queue_wait(&bs->throttled_reqs); > + /* if must wait or any request of this type throttled queue the IO= */ > + if (must_wait || > + !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { > + qemu_co_queue_wait(&bs->throttled_reqs[is_write]); > } > =20 > - /* In fact, we hope to keep each request's timing, in FIFO mode. T= he next > - * throttled requests will not be dequeued until the current reque= st is > - * allowed to be serviced. So if the current request still exceeds= the > - * limits, it will be inserted to the head. All requests followed = it will > - * be still in throttled_reqs queue. > - */ > + /* the IO will be executed, do the accounting */ > + throttle_account(&bs->throttle_state, > + is_write, > + nb_sectors * BDRV_SECTOR_SIZE); > =20 > - while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)= ) { > - timer_mod(bs->block_timer, > - wait_time + qemu_clock_get_ns(QEMU_CLOCK_VIRTUA= L)); > - qemu_co_queue_wait_insert_head(&bs->throttled_reqs); > + /* if the next request must wait -> do nothing */ > + if (throttle_schedule_timer(&bs->throttle_state, is_write)) { > + return; > } > =20 > - qemu_co_queue_next(&bs->throttled_reqs); > + /* else queue next request for execution */ > + qemu_co_queue_next(&bs->throttled_reqs[is_write]); > } > =20 > /* check if the path starts with ":" */ > @@ -305,7 +330,8 @@ BlockDriverState *bdrv_new(const char *device_name) > bdrv_iostatus_disable(bs); > notifier_list_init(&bs->close_notifiers); > notifier_with_return_list_init(&bs->before_write_notifiers); > - qemu_co_queue_init(&bs->throttled_reqs); > + qemu_co_queue_init(&bs->throttled_reqs[0]); > + qemu_co_queue_init(&bs->throttled_reqs[1]); > =20 > return bs; > } > @@ -1113,11 +1139,6 @@ int bdrv_open(BlockDriverState *bs, const char *= filename, QDict *options, > bdrv_dev_change_media_cb(bs, true); > } > =20 > - /* throttling disk I/O limits */ > - if (bs->io_limits_enabled) { > - bdrv_io_limits_enable(bs); > - } > - > return 0; > =20 > unlink_and_fail: > @@ -1436,7 +1457,10 @@ static bool bdrv_requests_pending(BlockDriverSta= te *bs) > if (!QLIST_EMPTY(&bs->tracked_requests)) { > return true; > } > - if (!qemu_co_queue_empty(&bs->throttled_reqs)) { > + if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { > + return true; > + } > + if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { > return true; > } > if (bs->file && bdrv_requests_pending(bs->file)) { > @@ -1482,7 +1506,7 @@ void bdrv_drain_all(void) > * a busy wait. > */ > QTAILQ_FOREACH(bs, &bdrv_states, list) { > - while (qemu_co_enter_next(&bs->throttled_reqs)) { > + if (bdrv_start_throttled_reqs(bs)) { > busy =3D true; > } > } > @@ -1524,13 +1548,12 @@ static void bdrv_move_feature_fields(BlockDrive= rState *bs_dest, > =20 > bs_dest->enable_write_cache =3D bs_src->enable_write_cache; > =20 > - /* i/o timing parameters */ > - bs_dest->slice_start =3D bs_src->slice_start; > - bs_dest->slice_end =3D bs_src->slice_end; > - bs_dest->slice_submitted =3D bs_src->slice_submitted; > - bs_dest->io_limits =3D bs_src->io_limits; > - bs_dest->throttled_reqs =3D bs_src->throttled_reqs; > - bs_dest->block_timer =3D bs_src->block_timer; > + /* i/o throttled req */ > + memcpy(&bs_dest->throttle_state, > + &bs_src->throttle_state, > + sizeof(ThrottleState)); > + bs_dest->throttled_reqs[0] =3D bs_src->throttled_reqs[0]; > + bs_dest->throttled_reqs[1] =3D bs_src->throttled_reqs[1]; > bs_dest->io_limits_enabled =3D bs_src->io_limits_enabled; > =20 > /* r/w error */ > @@ -1577,7 +1600,7 @@ void bdrv_swap(BlockDriverState *bs_new, BlockDri= verState *bs_old) > assert(bs_new->dev =3D=3D NULL); > assert(bs_new->in_use =3D=3D 0); > assert(bs_new->io_limits_enabled =3D=3D false); > - assert(bs_new->block_timer =3D=3D NULL); > + assert(!throttle_have_timer(&bs_new->throttle_state)); > =20 > tmp =3D *bs_new; > *bs_new =3D *bs_old; > @@ -1596,7 +1619,7 @@ void bdrv_swap(BlockDriverState *bs_new, BlockDri= verState *bs_old) > assert(bs_new->job =3D=3D NULL); > assert(bs_new->in_use =3D=3D 0); > assert(bs_new->io_limits_enabled =3D=3D false); > - assert(bs_new->block_timer =3D=3D NULL); > + assert(!throttle_have_timer(&bs_new->throttle_state)); > =20 > bdrv_rebind(bs_new); > bdrv_rebind(bs_old); > @@ -2539,11 +2562,6 @@ static int coroutine_fn bdrv_co_do_readv(BlockDr= iverState *bs, > return -EIO; > } > =20 > - /* throttling disk read I/O */ > - if (bs->io_limits_enabled) { > - bdrv_io_limits_intercept(bs, false, nb_sectors); > - } > - > if (bs->copy_on_read) { > flags |=3D BDRV_REQ_COPY_ON_READ; > } > @@ -2555,6 +2573,11 @@ static int coroutine_fn bdrv_co_do_readv(BlockDr= iverState *bs, > wait_for_overlapping_requests(bs, sector_num, nb_sectors); > } > =20 > + /* throttling disk I/O */ > + if (bs->io_limits_enabled) { > + bdrv_io_limits_intercept(bs, nb_sectors, false); > + } > + > tracked_request_begin(&req, bs, sector_num, nb_sectors, false); > =20 > if (flags & BDRV_REQ_COPY_ON_READ) { > @@ -2680,15 +2703,15 @@ static int coroutine_fn bdrv_co_do_writev(Block= DriverState *bs, > return -EIO; > } > =20 > - /* throttling disk write I/O */ > - if (bs->io_limits_enabled) { > - bdrv_io_limits_intercept(bs, true, nb_sectors); > - } > - > if (bs->copy_on_read_in_flight) { > wait_for_overlapping_requests(bs, sector_num, nb_sectors); > } > =20 > + /* throttling disk I/O */ > + if (bs->io_limits_enabled) { > + bdrv_io_limits_intercept(bs, nb_sectors, true); > + } > + > tracked_request_begin(&req, bs, sector_num, nb_sectors, true); > =20 > ret =3D notifier_with_return_list_notify(&bs->before_write_notifie= rs, &req); > @@ -2806,14 +2829,6 @@ void bdrv_get_geometry(BlockDriverState *bs, uin= t64_t *nb_sectors_ptr) > *nb_sectors_ptr =3D length; > } > =20 > -/* throttling disk io limits */ > -void bdrv_set_io_limits(BlockDriverState *bs, > - BlockIOLimit *io_limits) > -{ > - bs->io_limits =3D *io_limits; > - bs->io_limits_enabled =3D bdrv_io_limits_enabled(bs); > -} > - > void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_e= rror, > BlockdevOnError on_write_error) > { > @@ -3623,169 +3638,6 @@ void bdrv_aio_cancel(BlockDriverAIOCB *acb) > acb->aiocb_info->cancel(acb); > } > =20 > -/* block I/O throttling */ > -static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sector= s, > - bool is_write, double elapsed_time, uint64_t *wait) > -{ > - uint64_t bps_limit =3D 0; > - uint64_t extension; > - double bytes_limit, bytes_base, bytes_res; > - double slice_time, wait_time; > - > - if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) { > - bps_limit =3D bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]; > - } else if (bs->io_limits.bps[is_write]) { > - bps_limit =3D bs->io_limits.bps[is_write]; > - } else { > - if (wait) { > - *wait =3D 0; > - } > - > - return false; > - } > - > - slice_time =3D bs->slice_end - bs->slice_start; > - slice_time /=3D (NANOSECONDS_PER_SECOND); > - bytes_limit =3D bps_limit * slice_time; > - bytes_base =3D bs->slice_submitted.bytes[is_write]; > - if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) { > - bytes_base +=3D bs->slice_submitted.bytes[!is_write]; > - } > - > - /* bytes_base: the bytes of data which have been read/written; and > - * it is obtained from the history statistic info. > - * bytes_res: the remaining bytes of data which need to be read/wr= itten. > - * (bytes_base + bytes_res) / bps_limit: used to calcuate > - * the total time for completing reading/writting all = data. > - */ > - bytes_res =3D (unsigned) nb_sectors * BDRV_SECTOR_SIZE; > - > - if (bytes_base + bytes_res <=3D bytes_limit) { > - if (wait) { > - *wait =3D 0; > - } > - > - return false; > - } > - > - /* Calc approx time to dispatch */ > - wait_time =3D (bytes_base + bytes_res) / bps_limit - elapsed_time; > - > - /* When the I/O rate at runtime exceeds the limits, > - * bs->slice_end need to be extended in order that the current sta= tistic > - * info can be kept until the timer fire, so it is increased and t= uned > - * based on the result of experiment. > - */ > - extension =3D wait_time * NANOSECONDS_PER_SECOND; > - extension =3D DIV_ROUND_UP(extension, BLOCK_IO_SLICE_TIME) * > - BLOCK_IO_SLICE_TIME; > - bs->slice_end +=3D extension; > - if (wait) { > - *wait =3D wait_time * NANOSECONDS_PER_SECOND; > - } > - > - return true; > -} > - > -static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_writ= e, > - double elapsed_time, uint64_t *wait) > -{ > - uint64_t iops_limit =3D 0; > - double ios_limit, ios_base; > - double slice_time, wait_time; > - > - if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) { > - iops_limit =3D bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]; > - } else if (bs->io_limits.iops[is_write]) { > - iops_limit =3D bs->io_limits.iops[is_write]; > - } else { > - if (wait) { > - *wait =3D 0; > - } > - > - return false; > - } > - > - slice_time =3D bs->slice_end - bs->slice_start; > - slice_time /=3D (NANOSECONDS_PER_SECOND); > - ios_limit =3D iops_limit * slice_time; > - ios_base =3D bs->slice_submitted.ios[is_write]; > - if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) { > - ios_base +=3D bs->slice_submitted.ios[!is_write]; > - } > - > - if (ios_base + 1 <=3D ios_limit) { > - if (wait) { > - *wait =3D 0; > - } > - > - return false; > - } > - > - /* Calc approx time to dispatch, in seconds */ > - wait_time =3D (ios_base + 1) / iops_limit; > - if (wait_time > elapsed_time) { > - wait_time =3D wait_time - elapsed_time; > - } else { > - wait_time =3D 0; > - } > - > - /* Exceeded current slice, extend it by another slice time */ > - bs->slice_end +=3D BLOCK_IO_SLICE_TIME; > - if (wait) { > - *wait =3D wait_time * NANOSECONDS_PER_SECOND; > - } > - > - return true; > -} > - > -static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors= , > - bool is_write, int64_t *wait) > -{ > - int64_t now, max_wait; > - uint64_t bps_wait =3D 0, iops_wait =3D 0; > - double elapsed_time; > - int bps_ret, iops_ret; > - > - now =3D qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); > - if (now > bs->slice_end) { > - bs->slice_start =3D now; > - bs->slice_end =3D now + BLOCK_IO_SLICE_TIME; > - memset(&bs->slice_submitted, 0, sizeof(bs->slice_submitted)); > - } > - > - elapsed_time =3D now - bs->slice_start; > - elapsed_time /=3D (NANOSECONDS_PER_SECOND); > - > - bps_ret =3D bdrv_exceed_bps_limits(bs, nb_sectors, > - is_write, elapsed_time, &bps_wai= t); > - iops_ret =3D bdrv_exceed_iops_limits(bs, is_write, > - elapsed_time, &iops_wait); > - if (bps_ret || iops_ret) { > - max_wait =3D bps_wait > iops_wait ? bps_wait : iops_wait; > - if (wait) { > - *wait =3D max_wait; > - } > - > - now =3D qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); > - if (bs->slice_end < now + max_wait) { > - bs->slice_end =3D now + max_wait; > - } > - > - return true; > - } > - > - if (wait) { > - *wait =3D 0; > - } > - > - bs->slice_submitted.bytes[is_write] +=3D (int64_t)nb_sectors * > - BDRV_SECTOR_SIZE; > - bs->slice_submitted.ios[is_write]++; > - > - return false; > -} > - > /**************************************************************/ > /* async block device emulation */ > =20 > diff --git a/block/qapi.c b/block/qapi.c > index a4bc411..cac3919 100644 > --- a/block/qapi.c > +++ b/block/qapi.c > @@ -223,18 +223,15 @@ void bdrv_query_info(BlockDriverState *bs, > info->inserted->backing_file_depth =3D bdrv_get_backing_file_d= epth(bs); > =20 > if (bs->io_limits_enabled) { > - info->inserted->bps =3D > - bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]; > - info->inserted->bps_rd =3D > - bs->io_limits.bps[BLOCK_IO_LIMIT_READ]; > - info->inserted->bps_wr =3D > - bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE]; > - info->inserted->iops =3D > - bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]; > - info->inserted->iops_rd =3D > - bs->io_limits.iops[BLOCK_IO_LIMIT_READ]; > - info->inserted->iops_wr =3D > - bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE]; > + ThrottleConfig cfg; > + throttle_get_config(&bs->throttle_state, &cfg); > + info->inserted->bps =3D cfg.buckets[THROTTLE_BPS_TOTAL= ].avg; > + info->inserted->bps_rd =3D cfg.buckets[THROTTLE_BPS_READ]= .avg; > + info->inserted->bps_wr =3D cfg.buckets[THROTTLE_BPS_WRITE= ].avg; > + > + info->inserted->iops =3D cfg.buckets[THROTTLE_OPS_TOTAL= ].avg; > + info->inserted->iops_rd =3D cfg.buckets[THROTTLE_OPS_READ]= .avg; > + info->inserted->iops_wr =3D cfg.buckets[THROTTLE_OPS_WRITE= ].avg; > } > =20 > bs0 =3D bs; > diff --git a/blockdev.c b/blockdev.c > index 121520e..66fce9f 100644 > --- a/blockdev.c > +++ b/blockdev.c > @@ -281,32 +281,16 @@ static int parse_block_error_action(const char *b= uf, bool is_read) > } > } > =20 > -static bool do_check_io_limits(BlockIOLimit *io_limits, Error **errp) > +static bool check_throttle_config(ThrottleConfig *cfg, Error **errp) > { > - bool bps_flag; > - bool iops_flag; > - > - assert(io_limits); > - > - bps_flag =3D (io_limits->bps[BLOCK_IO_LIMIT_TOTAL] !=3D 0) > - && ((io_limits->bps[BLOCK_IO_LIMIT_READ] !=3D 0) > - || (io_limits->bps[BLOCK_IO_LIMIT_WRITE] !=3D 0)); > - iops_flag =3D (io_limits->iops[BLOCK_IO_LIMIT_TOTAL] !=3D 0) > - && ((io_limits->iops[BLOCK_IO_LIMIT_READ] !=3D 0) > - || (io_limits->iops[BLOCK_IO_LIMIT_WRITE] !=3D 0)); > - if (bps_flag || iops_flag) { > - error_setg(errp, "bps(iops) and bps_rd/bps_wr(iops_rd/iops_wr)= " > + if (throttle_conflicting(cfg)) { > + error_setg(errp, "bps/iops/max total values and read/write val= ues" > "cannot be used at the same time"); The string is concatenated as: "bps/iops/max total values and read/write valuescannot be used at the sam= e time" Missing space at here: ^ > return false; > } > =20 > - if (io_limits->bps[BLOCK_IO_LIMIT_TOTAL] < 0 || > - io_limits->bps[BLOCK_IO_LIMIT_WRITE] < 0 || > - io_limits->bps[BLOCK_IO_LIMIT_READ] < 0 || > - io_limits->iops[BLOCK_IO_LIMIT_TOTAL] < 0 || > - io_limits->iops[BLOCK_IO_LIMIT_WRITE] < 0 || > - io_limits->iops[BLOCK_IO_LIMIT_READ] < 0) { > - error_setg(errp, "bps and iops values must be 0 or greater"); > + if (!throttle_is_valid(cfg)) { > + error_setg(errp, "bps/iops/maxs values must be 0 or greater"); > return false; > } > =20 > @@ -331,7 +315,7 @@ static DriveInfo *blockdev_init(QemuOpts *all_opts, > int on_read_error, on_write_error; > const char *devaddr; > DriveInfo *dinfo; > - BlockIOLimit io_limits; > + ThrottleConfig cfg; > int snapshot =3D 0; > bool copy_on_read; > int ret; > @@ -497,20 +481,31 @@ static DriveInfo *blockdev_init(QemuOpts *all_opt= s, > } > =20 > /* disk I/O throttling */ > - io_limits.bps[BLOCK_IO_LIMIT_TOTAL] =3D > + memset(&cfg, 0, sizeof(cfg)); > + cfg.buckets[THROTTLE_BPS_TOTAL].avg =3D > qemu_opt_get_number(opts, "throttling.bps-total", 0); > - io_limits.bps[BLOCK_IO_LIMIT_READ] =3D > + cfg.buckets[THROTTLE_BPS_READ].avg =3D > qemu_opt_get_number(opts, "throttling.bps-read", 0); > - io_limits.bps[BLOCK_IO_LIMIT_WRITE] =3D > + cfg.buckets[THROTTLE_BPS_WRITE].avg =3D > qemu_opt_get_number(opts, "throttling.bps-write", 0); > - io_limits.iops[BLOCK_IO_LIMIT_TOTAL] =3D > + cfg.buckets[THROTTLE_OPS_TOTAL].avg =3D > qemu_opt_get_number(opts, "throttling.iops-total", 0); > - io_limits.iops[BLOCK_IO_LIMIT_READ] =3D > + cfg.buckets[THROTTLE_OPS_READ].avg =3D > qemu_opt_get_number(opts, "throttling.iops-read", 0); > - io_limits.iops[BLOCK_IO_LIMIT_WRITE] =3D > + cfg.buckets[THROTTLE_OPS_WRITE].avg =3D > qemu_opt_get_number(opts, "throttling.iops-write", 0); > =20 > - if (!do_check_io_limits(&io_limits, &error)) { > + cfg.buckets[THROTTLE_BPS_TOTAL].max =3D 0; > + cfg.buckets[THROTTLE_BPS_READ].max =3D 0; > + cfg.buckets[THROTTLE_BPS_WRITE].max =3D 0; > + > + cfg.buckets[THROTTLE_OPS_TOTAL].max =3D 0; > + cfg.buckets[THROTTLE_OPS_READ].max =3D 0; > + cfg.buckets[THROTTLE_OPS_WRITE].max =3D 0; > + > + cfg.op_size =3D 0; > + > + if (!check_throttle_config(&cfg, &error)) { > error_report("%s", error_get_pretty(error)); > error_free(error); > return NULL; > @@ -637,7 +632,10 @@ static DriveInfo *blockdev_init(QemuOpts *all_opts= , > bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); > =20 > /* disk I/O throttling */ > - bdrv_set_io_limits(dinfo->bdrv, &io_limits); > + if (throttle_enabled(&cfg)) { > + bdrv_io_limits_enable(dinfo->bdrv); > + bdrv_set_io_limits(dinfo->bdrv, &cfg); > + } > =20 > switch(type) { > case IF_IDE: > @@ -1271,7 +1269,7 @@ void qmp_block_set_io_throttle(const char *device= , int64_t bps, int64_t bps_rd, > int64_t bps_wr, int64_t iops, int64_t i= ops_rd, > int64_t iops_wr, Error **errp) > { > - BlockIOLimit io_limits; > + ThrottleConfig cfg; > BlockDriverState *bs; > =20 > bs =3D bdrv_find(device); > @@ -1280,27 +1278,37 @@ void qmp_block_set_io_throttle(const char *devi= ce, int64_t bps, int64_t bps_rd, > return; > } > =20 > - io_limits.bps[BLOCK_IO_LIMIT_TOTAL] =3D bps; > - io_limits.bps[BLOCK_IO_LIMIT_READ] =3D bps_rd; > - io_limits.bps[BLOCK_IO_LIMIT_WRITE] =3D bps_wr; > - io_limits.iops[BLOCK_IO_LIMIT_TOTAL]=3D iops; > - io_limits.iops[BLOCK_IO_LIMIT_READ] =3D iops_rd; > - io_limits.iops[BLOCK_IO_LIMIT_WRITE]=3D iops_wr; > + memset(&cfg, 0, sizeof(cfg)); > + cfg.buckets[THROTTLE_BPS_TOTAL].avg =3D bps; > + cfg.buckets[THROTTLE_BPS_READ].avg =3D bps_rd; > + cfg.buckets[THROTTLE_BPS_WRITE].avg =3D bps_wr; > + > + cfg.buckets[THROTTLE_OPS_TOTAL].avg =3D iops; > + cfg.buckets[THROTTLE_OPS_READ].avg =3D iops_rd; > + cfg.buckets[THROTTLE_OPS_WRITE].avg =3D iops_wr; > + > + cfg.buckets[THROTTLE_BPS_TOTAL].max =3D 0; > + cfg.buckets[THROTTLE_BPS_READ].max =3D 0; > + cfg.buckets[THROTTLE_BPS_WRITE].max =3D 0; > + > + cfg.buckets[THROTTLE_OPS_TOTAL].max =3D 0; > + cfg.buckets[THROTTLE_OPS_READ].max =3D 0; > + cfg.buckets[THROTTLE_OPS_WRITE].max =3D 0; > =20 > - if (!do_check_io_limits(&io_limits, errp)) { > + cfg.op_size =3D 0; > + > + if (!check_throttle_config(&cfg, errp)) { > return; > } > =20 > - bs->io_limits =3D io_limits; > - > - if (!bs->io_limits_enabled && bdrv_io_limits_enabled(bs)) { > + if (!bs->io_limits_enabled && throttle_enabled(&cfg)) { > bdrv_io_limits_enable(bs); > - } else if (bs->io_limits_enabled && !bdrv_io_limits_enabled(bs)) { > + } else if (bs->io_limits_enabled && !throttle_enabled(&cfg)) { > bdrv_io_limits_disable(bs); > - } else { > - if (bs->block_timer) { > - timer_mod(bs->block_timer, qemu_clock_get_ns(QEMU_CLOCK_VI= RTUAL)); > - } > + } > + > + if (bs->io_limits_enabled) { > + bdrv_set_io_limits(bs, &cfg); > } > } > =20 > diff --git a/include/block/block.h b/include/block/block.h > index 742fce5..b16d579 100644 > --- a/include/block/block.h > +++ b/include/block/block.h > @@ -107,7 +107,6 @@ void bdrv_info_stats(Monitor *mon, QObject **ret_da= ta); > /* disk I/O throttling */ > void bdrv_io_limits_enable(BlockDriverState *bs); > void bdrv_io_limits_disable(BlockDriverState *bs); > -bool bdrv_io_limits_enabled(BlockDriverState *bs); > =20 > void bdrv_init(void); > void bdrv_init_with_whitelist(void); > diff --git a/include/block/block_int.h b/include/block/block_int.h > index 8012e25..c3c9c61 100644 > --- a/include/block/block_int.h > +++ b/include/block/block_int.h > @@ -35,18 +35,12 @@ > #include "qemu/hbitmap.h" > #include "block/snapshot.h" > #include "qemu/main-loop.h" > +#include "qemu/throttle.h" > =20 > #define BLOCK_FLAG_ENCRYPT 1 > #define BLOCK_FLAG_COMPAT6 4 > #define BLOCK_FLAG_LAZY_REFCOUNTS 8 > =20 > -#define BLOCK_IO_LIMIT_READ 0 > -#define BLOCK_IO_LIMIT_WRITE 1 > -#define BLOCK_IO_LIMIT_TOTAL 2 > - > -#define BLOCK_IO_SLICE_TIME 100000000 > -#define NANOSECONDS_PER_SECOND 1000000000.0 > - > #define BLOCK_OPT_SIZE "size" > #define BLOCK_OPT_ENCRYPT "encryption" > #define BLOCK_OPT_COMPAT6 "compat6" > @@ -70,17 +64,6 @@ typedef struct BdrvTrackedRequest { > CoQueue wait_queue; /* coroutines blocked on this request */ > } BdrvTrackedRequest; > =20 > - > -typedef struct BlockIOLimit { > - int64_t bps[3]; > - int64_t iops[3]; > -} BlockIOLimit; > - > -typedef struct BlockIOBaseValue { > - uint64_t bytes[2]; > - uint64_t ios[2]; > -} BlockIOBaseValue; > - > struct BlockDriver { > const char *format_name; > int instance_size; > @@ -264,13 +247,9 @@ struct BlockDriverState { > /* number of in-flight copy-on-read requests */ > unsigned int copy_on_read_in_flight; > =20 > - /* the time for latest disk I/O */ > - int64_t slice_start; > - int64_t slice_end; > - BlockIOLimit io_limits; > - BlockIOBaseValue slice_submitted; > - CoQueue throttled_reqs; > - QEMUTimer *block_timer; > + /* I/O throttling */ > + ThrottleState throttle_state; > + CoQueue throttled_reqs[2]; > bool io_limits_enabled; > =20 > /* I/O stats (display with "info blockstats"). */ > @@ -312,7 +291,8 @@ struct BlockDriverState { > int get_tmp_filename(char *filename, int size); > =20 > void bdrv_set_io_limits(BlockDriverState *bs, > - BlockIOLimit *io_limits); > + ThrottleConfig *cfg); > + > =20 > /** > * bdrv_add_before_write_notifier: > --=20 > 1.7.10.4 >=20 >=20