qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Kevin Wolf <kwolf@redhat.com>
To: anthony@codemonkey.ws
Cc: kwolf@redhat.com, qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH 08/41] block: add I/O throttling algorithm
Date: Mon,  5 Dec 2011 15:20:45 +0100	[thread overview]
Message-ID: <1323094878-7967-9-git-send-email-kwolf@redhat.com> (raw)
In-Reply-To: <1323094878-7967-1-git-send-email-kwolf@redhat.com>

From: Zhi Yong Wu <wuzhy@linux.vnet.ibm.com>

Signed-off-by: Zhi Yong Wu <wuzhy@linux.vnet.ibm.com>
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 block.c     |  234 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 block.h     |    1 +
 block_int.h |    1 +
 3 files changed, 236 insertions(+), 0 deletions(-)

diff --git a/block.c b/block.c
index 8cb41c0..42bd308 100644
--- a/block.c
+++ b/block.c
@@ -74,6 +74,13 @@ static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
                                                bool is_write);
 static void coroutine_fn bdrv_co_do_rw(void *opaque);
 
+static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
+        bool is_write, double elapsed_time, uint64_t *wait);
+static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
+        double elapsed_time, uint64_t *wait);
+static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
+        bool is_write, int64_t *wait);
+
 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
     QTAILQ_HEAD_INITIALIZER(bdrv_states);
 
@@ -107,6 +114,24 @@ int is_windows_drive(const char *filename)
 #endif
 
 /* throttling disk I/O limits */
+void bdrv_io_limits_disable(BlockDriverState *bs)
+{
+    bs->io_limits_enabled = false;
+
+    while (qemu_co_queue_next(&bs->throttled_reqs));
+
+    if (bs->block_timer) {
+        qemu_del_timer(bs->block_timer);
+        qemu_free_timer(bs->block_timer);
+        bs->block_timer = NULL;
+    }
+
+    bs->slice_start = 0;
+    bs->slice_end   = 0;
+    bs->slice_time  = 0;
+    memset(&bs->io_base, 0, sizeof(bs->io_base));
+}
+
 static void bdrv_block_timer(void *opaque)
 {
     BlockDriverState *bs = opaque;
@@ -136,6 +161,31 @@ bool bdrv_io_limits_enabled(BlockDriverState *bs)
          || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
 }
 
+static void bdrv_io_limits_intercept(BlockDriverState *bs,
+                                     bool is_write, int nb_sectors)
+{
+    int64_t wait_time = -1;
+
+    if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
+        qemu_co_queue_wait(&bs->throttled_reqs);
+    }
+
+    /* In fact, we hope to keep each request's timing, in FIFO mode. The next
+     * throttled requests will not be dequeued until the current request is
+     * allowed to be serviced. So if the current request still exceeds the
+     * limits, it will be inserted to the head. All requests followed it will
+     * be still in throttled_reqs queue.
+     */
+
+    while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
+        qemu_mod_timer(bs->block_timer,
+                       wait_time + qemu_get_clock_ns(vm_clock));
+        qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
+    }
+
+    qemu_co_queue_next(&bs->throttled_reqs);
+}
+
 /* check if the path starts with "<protocol>:" */
 static int path_has_protocol(const char *path)
 {
@@ -718,6 +768,11 @@ int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
         bdrv_dev_change_media_cb(bs, true);
     }
 
+    /* throttling disk I/O limits */
+    if (bs->io_limits_enabled) {
+        bdrv_io_limits_enable(bs);
+    }
+
     return 0;
 
 unlink_and_fail:
@@ -753,6 +808,11 @@ void bdrv_close(BlockDriverState *bs)
 
         bdrv_dev_change_media_cb(bs, false);
     }
+
+    /*throttling disk I/O limits*/
+    if (bs->io_limits_enabled) {
+        bdrv_io_limits_disable(bs);
+    }
 }
 
 void bdrv_close_all(void)
@@ -1298,6 +1358,11 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
         return -EIO;
     }
 
+    /* throttling disk read I/O */
+    if (bs->io_limits_enabled) {
+        bdrv_io_limits_intercept(bs, false, nb_sectors);
+    }
+
     return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
 }
 
@@ -1328,6 +1393,11 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
         return -EIO;
     }
 
+    /* throttling disk write I/O */
+    if (bs->io_limits_enabled) {
+        bdrv_io_limits_intercept(bs, true, nb_sectors);
+    }
+
     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
 
     if (bs->dirty_bitmap) {
@@ -2519,6 +2589,170 @@ void bdrv_aio_cancel(BlockDriverAIOCB *acb)
     acb->pool->cancel(acb);
 }
 
+/* block I/O throttling */
+static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
+                 bool is_write, double elapsed_time, uint64_t *wait)
+{
+    uint64_t bps_limit = 0;
+    double   bytes_limit, bytes_base, bytes_res;
+    double   slice_time, wait_time;
+
+    if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
+        bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
+    } else if (bs->io_limits.bps[is_write]) {
+        bps_limit = bs->io_limits.bps[is_write];
+    } else {
+        if (wait) {
+            *wait = 0;
+        }
+
+        return false;
+    }
+
+    slice_time = bs->slice_end - bs->slice_start;
+    slice_time /= (NANOSECONDS_PER_SECOND);
+    bytes_limit = bps_limit * slice_time;
+    bytes_base  = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
+    if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
+        bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
+    }
+
+    /* bytes_base: the bytes of data which have been read/written; and
+     *             it is obtained from the history statistic info.
+     * bytes_res: the remaining bytes of data which need to be read/written.
+     * (bytes_base + bytes_res) / bps_limit: used to calcuate
+     *             the total time for completing reading/writting all data.
+     */
+    bytes_res   = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
+
+    if (bytes_base + bytes_res <= bytes_limit) {
+        if (wait) {
+            *wait = 0;
+        }
+
+        return false;
+    }
+
+    /* Calc approx time to dispatch */
+    wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
+
+    /* When the I/O rate at runtime exceeds the limits,
+     * bs->slice_end need to be extended in order that the current statistic
+     * info can be kept until the timer fire, so it is increased and tuned
+     * based on the result of experiment.
+     */
+    bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
+    bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
+    if (wait) {
+        *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
+    }
+
+    return true;
+}
+
+static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
+                             double elapsed_time, uint64_t *wait)
+{
+    uint64_t iops_limit = 0;
+    double   ios_limit, ios_base;
+    double   slice_time, wait_time;
+
+    if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
+        iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
+    } else if (bs->io_limits.iops[is_write]) {
+        iops_limit = bs->io_limits.iops[is_write];
+    } else {
+        if (wait) {
+            *wait = 0;
+        }
+
+        return false;
+    }
+
+    slice_time = bs->slice_end - bs->slice_start;
+    slice_time /= (NANOSECONDS_PER_SECOND);
+    ios_limit  = iops_limit * slice_time;
+    ios_base   = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
+    if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
+        ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
+    }
+
+    if (ios_base + 1 <= ios_limit) {
+        if (wait) {
+            *wait = 0;
+        }
+
+        return false;
+    }
+
+    /* Calc approx time to dispatch */
+    wait_time = (ios_base + 1) / iops_limit;
+    if (wait_time > elapsed_time) {
+        wait_time = wait_time - elapsed_time;
+    } else {
+        wait_time = 0;
+    }
+
+    bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
+    bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
+    if (wait) {
+        *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
+    }
+
+    return true;
+}
+
+static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
+                           bool is_write, int64_t *wait)
+{
+    int64_t  now, max_wait;
+    uint64_t bps_wait = 0, iops_wait = 0;
+    double   elapsed_time;
+    int      bps_ret, iops_ret;
+
+    now = qemu_get_clock_ns(vm_clock);
+    if ((bs->slice_start < now)
+        && (bs->slice_end > now)) {
+        bs->slice_end = now + bs->slice_time;
+    } else {
+        bs->slice_time  =  5 * BLOCK_IO_SLICE_TIME;
+        bs->slice_start = now;
+        bs->slice_end   = now + bs->slice_time;
+
+        bs->io_base.bytes[is_write]  = bs->nr_bytes[is_write];
+        bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
+
+        bs->io_base.ios[is_write]    = bs->nr_ops[is_write];
+        bs->io_base.ios[!is_write]   = bs->nr_ops[!is_write];
+    }
+
+    elapsed_time  = now - bs->slice_start;
+    elapsed_time  /= (NANOSECONDS_PER_SECOND);
+
+    bps_ret  = bdrv_exceed_bps_limits(bs, nb_sectors,
+                                      is_write, elapsed_time, &bps_wait);
+    iops_ret = bdrv_exceed_iops_limits(bs, is_write,
+                                      elapsed_time, &iops_wait);
+    if (bps_ret || iops_ret) {
+        max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
+        if (wait) {
+            *wait = max_wait;
+        }
+
+        now = qemu_get_clock_ns(vm_clock);
+        if (bs->slice_end < now + max_wait) {
+            bs->slice_end = now + max_wait;
+        }
+
+        return true;
+    }
+
+    if (wait) {
+        *wait = 0;
+    }
+
+    return false;
+}
 
 /**************************************************************/
 /* async block device emulation */
diff --git a/block.h b/block.h
index 2d24408..83e17ca 100644
--- a/block.h
+++ b/block.h
@@ -100,6 +100,7 @@ void bdrv_info_stats(Monitor *mon, QObject **ret_data);
 
 /* disk I/O throttling */
 void bdrv_io_limits_enable(BlockDriverState *bs);
+void bdrv_io_limits_disable(BlockDriverState *bs);
 bool bdrv_io_limits_enabled(BlockDriverState *bs);
 
 void bdrv_init(void);
diff --git a/block_int.h b/block_int.h
index 97b1c2b..e2799e4 100644
--- a/block_int.h
+++ b/block_int.h
@@ -39,6 +39,7 @@
 #define BLOCK_IO_LIMIT_TOTAL    2
 
 #define BLOCK_IO_SLICE_TIME     100000000
+#define NANOSECONDS_PER_SECOND  1000000000.0
 
 #define BLOCK_OPT_SIZE          "size"
 #define BLOCK_OPT_ENCRYPT       "encryption"
-- 
1.7.6.4

  parent reply	other threads:[~2011-12-05 14:18 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-12-05 14:20 [Qemu-devel] [PULL 00/41] Block patches Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 01/41] qcow2: Unlock during COW Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 02/41] qcow2: avoid reentrant bdrv_read() in copy_sectors() Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 03/41] qed: adjust the way to get nb_sectors Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 04/41] xen_disk: remove dead code Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 05/41] block: Use bdrv functions to replace file operation in cow.c Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 06/41] block: add the blockio limits command line support Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 07/41] CoQueue: introduce qemu_co_queue_wait_insert_head Kevin Wolf
2011-12-05 14:20 ` Kevin Wolf [this message]
2011-12-05 14:20 ` [Qemu-devel] [PATCH 09/41] hmp/qmp: add block_set_io_throttle Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 10/41] block: Add coroutine_fn marker to coroutine functions Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 11/41] qcow2: Return real error code in qcow2_read_snapshots Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 12/41] qcow2: Return real error code in qcow2_write_snapshots Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 13/41] qcow2: Update snapshot table information at once Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 14/41] qcow2: Cleanups and memleak fix in qcow2_snapshot_create Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 15/41] qcow2: Rework qcow2_snapshot_create error handling Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 16/41] qcow2: Return real error in qcow2_snapshot_goto Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 17/41] qcow2: Fix order of refcount updates " Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 18/41] qcow2: Fix order in qcow2_snapshot_delete Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 19/41] qcow2: Fix error path in qcow2_snapshot_load_tmp Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 20/41] block: use public bdrv_is_allocated() interface Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 21/41] block: add .bdrv_co_is_allocated() Kevin Wolf
2011-12-05 14:20 ` [Qemu-devel] [PATCH 22/41] qed: convert to .bdrv_co_is_allocated() Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 23/41] block: convert qcow2, qcow2, and vmdk " Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 24/41] vvfat: convert " Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 25/41] vdi: " Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 26/41] cow: " Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 27/41] block: drop .bdrv_is_allocated() interface Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 28/41] block: add bdrv_co_is_allocated() interface Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 29/41] qemu-common: add QEMU_ALIGN_DOWN() and QEMU_ALIGN_UP() macros Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 30/41] coroutine: add qemu_co_queue_restart_all() Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 31/41] block: add request tracking Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 32/41] block: add interface to toggle copy-on-read Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 33/41] block: wait for overlapping requests Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 34/41] block: request overlap detection Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 35/41] block: core copy-on-read logic Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 36/41] block: add -drive copy-on-read=on|off Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 37/41] cow: use bdrv_co_is_allocated() Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 38/41] dma-helpers: Add trace events Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 39/41] block: implement bdrv_co_is_allocated() boundary cases Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 40/41] block: wait_for_overlapping_requests() deadlock detection Kevin Wolf
2011-12-05 14:21 ` [Qemu-devel] [PATCH 41/41] block: convert qemu_aio_flush() calls to bdrv_drain_all() Kevin Wolf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1323094878-7967-9-git-send-email-kwolf@redhat.com \
    --to=kwolf@redhat.com \
    --cc=anthony@codemonkey.ws \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).