From: Vivek Goyal <vgoyal@redhat.com>
To: nauman@google.com, dpshah@google.com, lizf@cn.fujitsu.com,
mikew@google.com, fchecconi@gmail.com, paolo.valente@unimore.it,
jens.axboe@oracle.com, ryov@valinux.co.jp,
fernando@oss.ntt.co.jp, s-uchida@ap.jp.nec.com,
taka@valinux.co.jp, guijianfeng@cn.fujitsu.com,
jmoyer@redhat.com, dhaval@linux.vnet.ibm.com,
balbir@linux.vnet.ibm.com, linux-kernel@vger.kernel.org,
containers@lists.linux-foundation.org, righi.andrea@gmail.com,
agk@redhat.com, dm-devel@redhat.com, snitzer@redhat.com,
m-ikeda@ds.jp.nec.com
Cc: vgoyal@redhat.com, akpm@linux-foundation.org
Subject: [PATCH 03/18] io-controller: Charge for time slice based on average disk rate
Date: Tue, 5 May 2009 15:58:30 -0400 [thread overview]
Message-ID: <1241553525-28095-4-git-send-email-vgoyal@redhat.com> (raw)
In-Reply-To: <1241553525-28095-1-git-send-email-vgoyal@redhat.com>
o There are situations where a queue gets expired very soon and it looks
as if time slice used by that queue is zero. For example, If an async
queue dispatches a bunch of requests and queue is expired before first
request completes. Another example is where a queue is expired as soon
as first request completes and queue has no more requests (sync queues
on SSD).
o Currently we just charge 25% of slice length in such cases. This patch tries
to improve on that approximation by keeping a track of average disk rate
and charging for time by nr_sectors/disk_rate.
o This is still experimental, not very sure if it gives measurable improvement
or not.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
---
block/elevator-fq.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++++-
block/elevator-fq.h | 11 ++++++
2 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/block/elevator-fq.c b/block/elevator-fq.c
index 9aea899..9f1fbb9 100644
--- a/block/elevator-fq.c
+++ b/block/elevator-fq.c
@@ -19,6 +19,9 @@ const int elv_slice_async_rq = 2;
int elv_slice_idle = HZ / 125;
static struct kmem_cache *elv_ioq_pool;
+/* Maximum Window length for updating average disk rate */
+static int elv_rate_sampling_window = HZ / 10;
+
#define ELV_SLICE_SCALE (5)
#define ELV_HW_QUEUE_MIN (5)
#define IO_SERVICE_TREE_INIT ((struct io_service_tree) \
@@ -1022,6 +1025,47 @@ static void elv_ioq_update_io_thinktime(struct io_queue *ioq)
ioq->ttime_mean = (ioq->ttime_total + 128) / ioq->ttime_samples;
}
+static void elv_update_io_rate(struct elv_fq_data *efqd, struct request *rq)
+{
+ long elapsed = jiffies - efqd->rate_sampling_start;
+ unsigned long total;
+
+ /* sampling window is off */
+ if (!efqd->rate_sampling_start)
+ return;
+
+ efqd->rate_sectors_current += rq->nr_sectors;
+
+ if (efqd->rq_in_driver && (elapsed < elv_rate_sampling_window))
+ return;
+
+ efqd->rate_sectors = (7*efqd->rate_sectors +
+ 256*efqd->rate_sectors_current) / 8;
+
+ if (!elapsed) {
+ /*
+ * updating rate before a jiffy could complete. Could be a
+ * problem with fast queuing/non-queuing hardware. Should we
+ * look at higher resolution time source?
+ *
+ * In case of non-queuing hardware we will probably not try to
+ * dispatch from multiple queues and will be able to account
+ * for disk time used and will not need this approximation
+ * anyway?
+ */
+ elapsed = 1;
+ }
+
+ efqd->rate_time = (7*efqd->rate_time + 256*elapsed) / 8;
+ total = efqd->rate_sectors + (efqd->rate_time/2);
+ efqd->mean_rate = total/efqd->rate_time;
+
+ elv_log(efqd, "mean_rate=%d, t=%d s=%d", efqd->mean_rate,
+ elapsed, efqd->rate_sectors_current);
+ efqd->rate_sampling_start = 0;
+ efqd->rate_sectors_current = 0;
+}
+
/*
* Disable idle window if the process thinks too long.
* This idle flag can also be updated by io scheduler.
@@ -1312,6 +1356,34 @@ void elv_del_ioq_busy(struct elevator_queue *e, struct io_queue *ioq,
}
/*
+ * Calculate the effective disk time used by the queue based on how many
+ * sectors queue has dispatched and what is the average disk rate
+ * Returns disk time in ms.
+ */
+static inline unsigned long elv_disk_time_used(struct request_queue *q,
+ struct io_queue *ioq)
+{
+ struct elv_fq_data *efqd = &q->elevator->efqd;
+ struct io_entity *entity = &ioq->entity;
+ unsigned long jiffies_used = 0;
+
+ if (!efqd->mean_rate)
+ return entity->budget/4;
+
+ /* Charge the queue based on average disk rate */
+ jiffies_used = ioq->nr_sectors/efqd->mean_rate;
+
+ if (!jiffies_used)
+ jiffies_used = 1;
+
+ elv_log_ioq(efqd, ioq, "disk time=%ldms sect=%ld rate=%ld",
+ jiffies_to_msecs(jiffies_used),
+ ioq->nr_sectors, efqd->mean_rate);
+
+ return jiffies_used;
+}
+
+/*
* Do the accounting. Determine how much service (in terms of time slices)
* current queue used and adjust the start, finish time of queue and vtime
* of the tree accordingly.
@@ -1363,7 +1435,7 @@ void __elv_ioq_slice_expired(struct request_queue *q, struct io_queue *ioq)
* the requests to finish. But this will reduce throughput.
*/
if (!ioq->slice_end)
- slice_used = entity->budget/4;
+ slice_used = elv_disk_time_used(q, ioq);
else {
if (time_after(ioq->slice_end, jiffies)) {
slice_unused = ioq->slice_end - jiffies;
@@ -1373,7 +1445,7 @@ void __elv_ioq_slice_expired(struct request_queue *q, struct io_queue *ioq)
* completing first request. Charge 25% of
* slice.
*/
- slice_used = entity->budget/4;
+ slice_used = elv_disk_time_used(q, ioq);
} else
slice_used = entity->budget - slice_unused;
} else {
@@ -1391,6 +1463,8 @@ void __elv_ioq_slice_expired(struct request_queue *q, struct io_queue *ioq)
BUG_ON(ioq != efqd->active_queue);
elv_reset_active_ioq(efqd);
+ /* Queue is being expired. Reset number of secotrs dispatched */
+ ioq->nr_sectors = 0;
if (!ioq->nr_queued)
elv_del_ioq_busy(q->elevator, ioq, 1);
else
@@ -1725,6 +1799,7 @@ void elv_fq_dispatched_request(struct elevator_queue *e, struct request *rq)
BUG_ON(!ioq);
elv_ioq_request_dispatched(ioq);
+ ioq->nr_sectors += rq->nr_sectors;
elv_ioq_request_removed(e, rq);
elv_clear_ioq_must_dispatch(ioq);
}
@@ -1737,6 +1812,10 @@ void elv_fq_activate_rq(struct request_queue *q, struct request *rq)
return;
efqd->rq_in_driver++;
+
+ if (!efqd->rate_sampling_start)
+ efqd->rate_sampling_start = jiffies;
+
elv_log_ioq(efqd, rq_ioq(rq), "activate rq, drv=%d",
efqd->rq_in_driver);
}
@@ -1826,6 +1905,8 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
efqd->rq_in_driver--;
ioq->dispatched--;
+ elv_update_io_rate(efqd, rq);
+
if (sync)
ioq->last_end_request = jiffies;
diff --git a/block/elevator-fq.h b/block/elevator-fq.h
index 3bea279..ce2d671 100644
--- a/block/elevator-fq.h
+++ b/block/elevator-fq.h
@@ -165,6 +165,9 @@ struct io_queue {
/* Requests dispatched from this queue */
int dispatched;
+ /* Number of sectors dispatched in current dispatch round */
+ int nr_sectors;
+
/* Keep a track of think time of processes in this queue */
unsigned long last_end_request;
unsigned long ttime_total;
@@ -223,6 +226,14 @@ struct elv_fq_data {
struct work_struct unplug_work;
unsigned int elv_slice[2];
+
+ /* Fields for keeping track of average disk rate */
+ unsigned long rate_sectors; /* number of sectors finished */
+ unsigned long rate_time; /* jiffies elapsed */
+ unsigned long mean_rate; /* sectors per jiffy */
+ unsigned long long rate_sampling_start; /*sampling window start jifies*/
+ /* number of sectors finished io during current sampling window */
+ unsigned long rate_sectors_current;
};
extern int elv_slice_idle;
--
1.6.0.1
next prev parent reply other threads:[~2009-05-05 20:01 UTC|newest]
Thread overview: 133+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-05-05 19:58 IO scheduler based IO Controller V2 Vivek Goyal
2009-05-05 19:58 ` [PATCH 01/18] io-controller: Documentation Vivek Goyal
2009-05-06 3:16 ` Gui Jianfeng
2009-05-06 13:31 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 02/18] io-controller: Common flat fair queuing code in elevaotor layer Vivek Goyal
2009-05-22 6:43 ` Gui Jianfeng
2009-05-22 12:32 ` Vivek Goyal
2009-05-23 20:04 ` Jens Axboe
2009-05-05 19:58 ` Vivek Goyal [this message]
2009-05-05 19:58 ` [PATCH 04/18] io-controller: Modify cfq to make use of flat elevator fair queuing Vivek Goyal
2009-05-22 8:54 ` Gui Jianfeng
2009-05-22 12:33 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 05/18] io-controller: Common hierarchical fair queuing code in elevaotor layer Vivek Goyal
2009-05-07 7:42 ` Gui Jianfeng
2009-05-07 8:05 ` Li Zefan
2009-05-08 12:45 ` Vivek Goyal
2009-05-08 21:09 ` Andrea Righi
2009-05-08 21:17 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 06/18] io-controller: cfq changes to use " Vivek Goyal
2009-05-05 19:58 ` [PATCH 07/18] io-controller: Export disk time used and nr sectors dipatched through cgroups Vivek Goyal
2009-05-13 2:39 ` Gui Jianfeng
2009-05-13 14:51 ` Vivek Goyal
2009-05-14 7:53 ` Gui Jianfeng
2009-05-05 19:58 ` [PATCH 08/18] io-controller: idle for sometime on sync queue before expiring it Vivek Goyal
2009-05-13 15:00 ` Vivek Goyal
2009-06-09 7:56 ` Gui Jianfeng
2009-06-09 17:51 ` Vivek Goyal
2009-06-10 1:30 ` Gui Jianfeng
2009-06-10 13:26 ` Vivek Goyal
2009-06-11 1:22 ` Gui Jianfeng
2009-05-05 19:58 ` [PATCH 09/18] io-controller: Separate out queue and data Vivek Goyal
2009-05-05 19:58 ` [PATCH 10/18] io-conroller: Prepare elevator layer for single queue schedulers Vivek Goyal
2009-05-05 19:58 ` [PATCH 11/18] io-controller: noop changes for hierarchical fair queuing Vivek Goyal
2009-05-05 19:58 ` [PATCH 12/18] io-controller: deadline " Vivek Goyal
2009-05-05 19:58 ` [PATCH 13/18] io-controller: anticipatory " Vivek Goyal
2009-05-05 19:58 ` [PATCH 14/18] blkio_cgroup patches from Ryo to track async bios Vivek Goyal
2009-05-05 19:58 ` [PATCH 15/18] io-controller: map async requests to appropriate cgroup Vivek Goyal
2009-05-05 19:58 ` [PATCH 16/18] io-controller: Per cgroup request descriptor support Vivek Goyal
2009-05-05 19:58 ` [PATCH 17/18] io-controller: IO group refcounting support Vivek Goyal
2009-05-08 2:59 ` Gui Jianfeng
2009-05-08 12:44 ` Vivek Goyal
2009-05-05 19:58 ` [PATCH 18/18] io-controller: Debug hierarchical IO scheduling Vivek Goyal
2009-05-06 21:40 ` IKEDA, Munehiro
2009-05-06 21:58 ` Vivek Goyal
2009-05-06 22:19 ` IKEDA, Munehiro
2009-05-06 22:24 ` Vivek Goyal
2009-05-06 23:01 ` IKEDA, Munehiro
2009-05-05 20:24 ` IO scheduler based IO Controller V2 Andrew Morton
2009-05-05 22:20 ` Peter Zijlstra
2009-05-06 3:42 ` Balbir Singh
2009-05-06 10:20 ` Fabio Checconi
2009-05-06 17:10 ` Balbir Singh
2009-05-06 18:47 ` Divyesh Shah
2009-05-06 20:42 ` Andrea Righi
2009-05-06 2:33 ` Vivek Goyal
2009-05-06 17:59 ` Nauman Rafique
2009-05-06 20:07 ` Andrea Righi
2009-05-06 21:21 ` Vivek Goyal
2009-05-06 22:02 ` Andrea Righi
2009-05-06 22:17 ` Vivek Goyal
2009-05-06 20:32 ` Vivek Goyal
2009-05-06 21:34 ` Andrea Righi
2009-05-06 21:52 ` Vivek Goyal
2009-05-06 22:35 ` Andrea Righi
2009-05-07 1:48 ` Ryo Tsuruta
2009-05-07 9:04 ` Andrea Righi
2009-05-07 12:22 ` Andrea Righi
2009-05-07 14:11 ` Vivek Goyal
2009-05-07 14:45 ` Vivek Goyal
2009-05-07 15:36 ` Vivek Goyal
2009-05-07 15:42 ` Vivek Goyal
2009-05-07 22:19 ` Andrea Righi
2009-05-08 18:09 ` Vivek Goyal
2009-05-08 20:05 ` Andrea Righi
2009-05-08 21:56 ` Vivek Goyal
2009-05-09 9:22 ` Peter Zijlstra
2009-05-14 10:31 ` Andrea Righi
2009-05-14 16:43 ` Dhaval Giani
2009-05-07 22:40 ` Andrea Righi
2009-05-07 0:18 ` Ryo Tsuruta
2009-05-07 1:25 ` Vivek Goyal
2009-05-11 11:23 ` Ryo Tsuruta
2009-05-11 12:49 ` Vivek Goyal
2009-05-08 14:24 ` Rik van Riel
2009-05-11 10:11 ` Ryo Tsuruta
2009-05-06 3:41 ` Balbir Singh
2009-05-06 13:28 ` Vivek Goyal
2009-05-06 8:11 ` Gui Jianfeng
2009-05-06 16:10 ` Vivek Goyal
2009-05-07 5:36 ` Li Zefan
2009-05-08 13:37 ` Vivek Goyal
2009-05-11 2:59 ` Gui Jianfeng
2009-05-07 5:47 ` Gui Jianfeng
2009-05-08 9:45 ` [PATCH] io-controller: Add io group reference handling for request Gui Jianfeng
2009-05-08 13:57 ` Vivek Goyal
2009-05-08 17:41 ` Nauman Rafique
2009-05-08 18:56 ` Vivek Goyal
2009-05-08 19:06 ` Nauman Rafique
2009-05-11 1:33 ` Gui Jianfeng
2009-05-11 15:41 ` Vivek Goyal
2009-05-15 5:15 ` Gui Jianfeng
2009-05-15 7:48 ` Andrea Righi
2009-05-15 8:16 ` Gui Jianfeng
2009-05-15 14:09 ` Vivek Goyal
2009-05-15 14:06 ` Vivek Goyal
2009-05-17 10:26 ` Andrea Righi
2009-05-18 14:01 ` Vivek Goyal
2009-05-18 14:39 ` Andrea Righi
2009-05-26 11:34 ` Ryo Tsuruta
2009-05-27 6:56 ` Ryo Tsuruta
2009-05-27 8:17 ` Andrea Righi
2009-05-27 11:53 ` Ryo Tsuruta
2009-05-27 17:32 ` Vivek Goyal
2009-05-19 12:18 ` Ryo Tsuruta
2009-05-15 7:40 ` Gui Jianfeng
2009-05-15 14:01 ` Vivek Goyal
2009-05-13 2:00 ` [PATCH] IO Controller: Add per-device weight and ioprio_class handling Gui Jianfeng
2009-05-13 14:44 ` Vivek Goyal
2009-05-14 0:59 ` Gui Jianfeng
2009-05-13 15:29 ` Vivek Goyal
2009-05-14 1:02 ` Gui Jianfeng
2009-05-13 15:59 ` Vivek Goyal
2009-05-14 1:51 ` Gui Jianfeng
2009-05-14 2:25 ` Gui Jianfeng
2009-05-13 17:17 ` Vivek Goyal
2009-05-14 1:24 ` Gui Jianfeng
2009-05-13 19:09 ` Vivek Goyal
2009-05-14 1:35 ` Gui Jianfeng
2009-05-14 7:26 ` Gui Jianfeng
2009-05-14 15:15 ` Vivek Goyal
2009-05-18 22:33 ` IKEDA, Munehiro
2009-05-20 1:44 ` Gui Jianfeng
2009-05-20 15:41 ` IKEDA, Munehiro
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1241553525-28095-4-git-send-email-vgoyal@redhat.com \
--to=vgoyal@redhat.com \
--cc=agk@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=balbir@linux.vnet.ibm.com \
--cc=containers@lists.linux-foundation.org \
--cc=dhaval@linux.vnet.ibm.com \
--cc=dm-devel@redhat.com \
--cc=dpshah@google.com \
--cc=fchecconi@gmail.com \
--cc=fernando@oss.ntt.co.jp \
--cc=guijianfeng@cn.fujitsu.com \
--cc=jens.axboe@oracle.com \
--cc=jmoyer@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=lizf@cn.fujitsu.com \
--cc=m-ikeda@ds.jp.nec.com \
--cc=mikew@google.com \
--cc=nauman@google.com \
--cc=paolo.valente@unimore.it \
--cc=righi.andrea@gmail.com \
--cc=ryov@valinux.co.jp \
--cc=s-uchida@ap.jp.nec.com \
--cc=snitzer@redhat.com \
--cc=taka@valinux.co.jp \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).