linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vivek Goyal <vgoyal@redhat.com>
To: linux-kernel@vger.kernel.org, jaxboe@fusionio.com,
	linux-fsdevel@vger.kernel.org
Cc: andrea@betterlinux.com, vgoyal@redhat.com
Subject: [PATCH 1/8] blk-throttle: convert wait routines to return jiffies to wait
Date: Tue, 28 Jun 2011 11:35:02 -0400	[thread overview]
Message-ID: <1309275309-12889-2-git-send-email-vgoyal@redhat.com> (raw)
In-Reply-To: <1309275309-12889-1-git-send-email-vgoyal@redhat.com>

Currently we return jiffies to wait for in a function parameter.
A cleaner way would be to return it as return value. Value 0 would mean
that there is no need to wait and anything > 0 means number of jiffies
to wait before bio can be dispatched.

Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
---
 block/blk-throttle.c |   72 +++++++++++++++++++++-----------------------------
 1 files changed, 30 insertions(+), 42 deletions(-)

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f6a7941..d76717a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -590,8 +590,8 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 			tg->slice_start[rw], tg->slice_end[rw], jiffies);
 }
 
-static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
-		struct bio *bio, unsigned long *wait)
+static unsigned long tg_wait_iops_limit(struct throtl_data *td,
+			struct throtl_grp *tg, struct bio *bio)
 {
 	bool rw = bio_data_dir(bio);
 	unsigned int io_allowed;
@@ -621,11 +621,8 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
 	else
 		io_allowed = tmp;
 
-	if (tg->io_disp[rw] + 1 <= io_allowed) {
-		if (wait)
-			*wait = 0;
-		return 1;
-	}
+	if (tg->io_disp[rw] + 1 <= io_allowed)
+		return 0;
 
 	/* Calc approx time to dispatch */
 	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
@@ -635,13 +632,15 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
 	else
 		jiffy_wait = 1;
 
-	if (wait)
-		*wait = jiffy_wait;
-	return 0;
+	return jiffy_wait;
 }
 
-static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
-		struct bio *bio, unsigned long *wait)
+/*
+ * Returns number of jiffies to wait to before IO can be dispatched according
+ * to bps limit.
+ */
+static unsigned long tg_wait_bps_limit(struct throtl_data *td,
+			struct throtl_grp *tg, struct bio *bio)
 {
 	bool rw = bio_data_dir(bio);
 	u64 bytes_allowed, extra_bytes, tmp;
@@ -659,11 +658,8 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
 	do_div(tmp, HZ);
 	bytes_allowed = tmp;
 
-	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
-		if (wait)
-			*wait = 0;
-		return 1;
-	}
+	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed)
+		return 0;
 
 	/* Calc approx time to dispatch */
 	extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
@@ -677,9 +673,8 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
 	 * up we did. Add that time also.
 	 */
 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
-	if (wait)
-		*wait = jiffy_wait;
-	return 0;
+
+	return jiffy_wait;
 }
 
 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
@@ -691,9 +686,12 @@ static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
 /*
  * Returns whether one can dispatch a bio or not. Also returns approx number
  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
+ *
+ * Retruns the number of jiffies one needs to wait before IO can be dispatched.
+ * 0 means, IO can be dispatched now.
  */
-static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
-				struct bio *bio, unsigned long *wait)
+static unsigned long
+tg_wait_dispatch(struct throtl_data *td, struct throtl_grp *tg, struct bio *bio)
 {
 	bool rw = bio_data_dir(bio);
 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
@@ -707,11 +705,8 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
 	BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
 
 	/* If tg->bps = -1, then BW is unlimited */
-	if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
-		if (wait)
-			*wait = 0;
-		return 1;
-	}
+	if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
+		return 0;
 
 	/*
 	 * If previous slice expired, start a new one otherwise renew/extend
@@ -725,22 +720,15 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
 			throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
 	}
 
-	if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
-	    && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
-		if (wait)
-			*wait = 0;
-		return 1;
-	}
+	bps_wait = tg_wait_bps_limit(td, tg, bio);
+	iops_wait = tg_wait_iops_limit(td, tg, bio);
 
 	max_wait = max(bps_wait, iops_wait);
 
-	if (wait)
-		*wait = max_wait;
-
 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 		throtl_extend_slice(td, tg, rw, jiffies + max_wait);
 
-	return 0;
+	return max_wait;
 }
 
 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
@@ -774,10 +762,10 @@ static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
 	struct bio *bio;
 
 	if ((bio = bio_list_peek(&tg->bio_lists[READ])))
-		tg_may_dispatch(td, tg, bio, &read_wait);
+		read_wait = tg_wait_dispatch(td, tg, bio);
 
 	if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
-		tg_may_dispatch(td, tg, bio, &write_wait);
+		write_wait = tg_wait_dispatch(td, tg, bio);
 
 	min_wait = min(read_wait, write_wait);
 	disptime = jiffies + min_wait;
@@ -819,7 +807,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
 	/* Try to dispatch 75% READS and 25% WRITES */
 
 	while ((bio = bio_list_peek(&tg->bio_lists[READ]))
-		&& tg_may_dispatch(td, tg, bio, NULL)) {
+		&& !tg_wait_dispatch(td, tg, bio)) {
 
 		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 		nr_reads++;
@@ -829,7 +817,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
 	}
 
 	while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
-		&& tg_may_dispatch(td, tg, bio, NULL)) {
+		&& !tg_wait_dispatch(td, tg, bio)) {
 
 		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 		nr_writes++;
@@ -1185,7 +1173,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
 	}
 
 	/* Bio is with-in rate limit of group */
-	if (tg_may_dispatch(td, tg, bio, NULL)) {
+	if (!tg_wait_dispatch(td, tg, bio)) {
 		throtl_charge_bio(tg, bio);
 
 		/*
-- 
1.7.4.4


  reply	other threads:[~2011-06-28 15:35 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-06-28 15:35 [PATCH 0/8][V2] blk-throttle: Throttle buffered WRITEs in balance_dirty_pages() Vivek Goyal
2011-06-28 15:35 ` Vivek Goyal [this message]
2011-06-28 15:35 ` [PATCH 2/8] blk-throttle: do not enforce first queued bio check in tg_wait_dispatch Vivek Goyal
2011-06-28 15:35 ` [PATCH 3/8] blk-throttle: use io size and direction as parameters to wait routines Vivek Goyal
2011-06-28 15:35 ` [PATCH 4/8] blk-throttle: specify number of ios during dispatch update Vivek Goyal
2011-06-28 15:35 ` [PATCH 5/8] blk-throttle: get rid of extend slice trace message Vivek Goyal
2011-06-28 15:35 ` [PATCH 6/8] blk-throttle: core logic to throttle task while dirtying pages Vivek Goyal
2011-06-29  9:30   ` Andrea Righi
2011-06-29 15:25   ` Andrea Righi
2011-06-29 20:03     ` Vivek Goyal
2011-06-28 15:35 ` [PATCH 7/8] blk-throttle: do not throttle writes at device level except direct io Vivek Goyal
2011-06-28 15:35 ` [PATCH 8/8] blk-throttle: enable throttling of task while dirtying pages Vivek Goyal
2011-06-30 14:52   ` Andrea Righi
2011-06-30 15:06     ` Andrea Righi
2011-06-30 17:14     ` Vivek Goyal
2011-06-30 21:22       ` Andrea Righi
2011-06-28 16:21 ` [PATCH 0/8][V2] blk-throttle: Throttle buffered WRITEs in balance_dirty_pages() Andrea Righi
2011-06-28 17:06   ` Vivek Goyal
2011-06-28 17:39     ` Andrea Righi
2011-06-29 16:05     ` Andrea Righi
2011-06-29 20:04       ` Vivek Goyal
2011-06-29  0:42 ` Dave Chinner
2011-06-29  1:53   ` Vivek Goyal
2011-06-30 20:04     ` fsync serialization on ext4 with blkio throttling (Was: Re: [PATCH 0/8][V2] blk-throttle: Throttle buffered WRITEs in balance_dirty_pages()) Vivek Goyal
2011-06-30 20:44       ` Vivek Goyal
2011-07-01  0:16         ` Dave Chinner
  -- strict thread matches above, loose matches on Subject: below --
2011-06-03 21:06 [RFC PATCH 0/8] blk-throttle: Throttle buffered WRITE in balance_dirty_pages() Vivek Goyal
2011-06-03 21:06 ` [PATCH 1/8] blk-throttle: convert wait routines to return jiffies to wait Vivek Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1309275309-12889-2-git-send-email-vgoyal@redhat.com \
    --to=vgoyal@redhat.com \
    --cc=andrea@betterlinux.com \
    --cc=jaxboe@fusionio.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).