linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vivek Goyal <vgoyal@redhat.com>
To: linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	axboe@kernel.dk
Cc: vgoyal@redhat.com, arighi@develer.com, fengguang.wu@intel.com,
	jack@suse.cz, akpm@linux-foundation.org
Subject: [PATCH 4/8] blk-throttle: Specify number of IOs during dispatch update
Date: Fri,  3 Jun 2011 17:06:45 -0400	[thread overview]
Message-ID: <1307135209-30539-5-git-send-email-vgoyal@redhat.com> (raw)
In-Reply-To: <1307135209-30539-1-git-send-email-vgoyal@redhat.com>

currently we assume the number of IOs to be 1 while dispatch stats
are being updated. It could happen that we are clubbing multiple
dispatches and updating in one shot. Throttling logic currently will
count one page as one IO for dirty page throttling. Hence modify the
logic to be able to specify number of IOs to associate with the
update.

Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
---
 block/blk-cgroup.c   |    6 +++---
 block/blk-cgroup.h   |    2 +-
 block/blk-throttle.c |    2 +-
 block/cfq-iosched.c  |    2 +-
 block/cfq.h          |    6 +++---
 5 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index bcaf16e..c0815c5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -394,8 +394,8 @@ EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  * should be called under rcu read lock or queue lock to make sure blkg pointer
  * is valid.
  */
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-				uint64_t bytes, bool direction, bool sync)
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
+			unsigned int nr_ios, bool direction, bool sync)
 {
 	struct blkio_group_stats_cpu *stats_cpu;
 	unsigned long flags;
@@ -412,7 +412,7 @@ void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
 	u64_stats_update_begin(&stats_cpu->syncp);
 	stats_cpu->sectors += bytes >> 9;
 	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
-			1, direction, sync);
+			nr_ios, direction, sync);
 	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
 			bytes, direction, sync);
 	u64_stats_update_end(&stats_cpu->syncp);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index a71d290..7202dcc 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -318,7 +318,7 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg,
 					unsigned long time,
 					unsigned long unaccounted_time);
 void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
-						bool direction, bool sync);
+				unsigned int nr_ios, bool direction, bool sync);
 void blkiocg_update_completion_stats(struct blkio_group *blkg,
 	uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 541830c..81df3ea 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -727,7 +727,7 @@ static void throtl_charge_io(struct throtl_grp *tg, bool rw, unsigned int sz,
 	tg->bytes_disp[rw] += sz;
 	tg->io_disp[rw] += nr_ios;
 
-	blkiocg_update_dispatch_stats(&tg->blkg, sz, rw, sync);
+	blkiocg_update_dispatch_stats(&tg->blkg, sz, nr_ios, rw, sync);
 }
 
 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7c52d68..9a1e335 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2064,7 +2064,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
 	cfqq->nr_sectors += blk_rq_sectors(rq);
 	cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
-					rq_data_dir(rq), rq_is_sync(rq));
+					1, rq_data_dir(rq), rq_is_sync(rq));
 }
 
 /*
diff --git a/block/cfq.h b/block/cfq.h
index 2a15592..1795b3d 100644
--- a/block/cfq.h
+++ b/block/cfq.h
@@ -56,9 +56,9 @@ cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
 }
 
 static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-				uint64_t bytes, bool direction, bool sync)
+		uint64_t bytes, unsigned int nr_ios, bool direction, bool sync)
 {
-	blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
+	blkiocg_update_dispatch_stats(blkg, bytes, nr_ios, direction, sync);
 }
 
 static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
@@ -101,7 +101,7 @@ static inline void
 cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
 
 static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-				uint64_t bytes, bool direction, bool sync) {}
+	uint64_t bytes, unsigned int nr_ios, bool direction, bool sync) {}
 static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
 
 static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-- 
1.7.4.4

  parent reply	other threads:[~2011-06-03 21:06 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-06-03 21:06 [RFC PATCH 0/8] blk-throttle: Throttle buffered WRITE in balance_dirty_pages() Vivek Goyal
2011-06-03 21:06 ` [PATCH 1/8] blk-throttle: convert wait routines to return jiffies to wait Vivek Goyal
2011-06-03 21:06 ` [PATCH 2/8] blk-throttle: do not enforce first queued bio check in tg_wait_dispatch Vivek Goyal
2011-06-03 21:06 ` [PATCH 3/8] blk-throttle: use IO size and direction as parameters to wait routines Vivek Goyal
2011-06-03 21:06 ` Vivek Goyal [this message]
2011-06-03 21:06 ` [PATCH 5/8] blk-throttle: Get rid of extend slice trace message Vivek Goyal
2011-06-03 21:06 ` [PATCH 6/8] blk-throttle: core logic to throttle task while dirtying pages Vivek Goyal
2011-06-03 21:06 ` [PATCH 7/8] blk-throttle: Do not throttle WRITEs at device level except direct IO Vivek Goyal
2011-06-03 21:06 ` [PATCH 8/8] blk-throttle: enable throttling of task while dirtying pages Vivek Goyal
  -- strict thread matches above, loose matches on Subject: below --
2011-06-28 15:35 [PATCH 0/8][V2] blk-throttle: Throttle buffered WRITEs in balance_dirty_pages() Vivek Goyal
2011-06-28 15:35 ` [PATCH 4/8] blk-throttle: specify number of ios during dispatch update Vivek Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1307135209-30539-5-git-send-email-vgoyal@redhat.com \
    --to=vgoyal@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=arighi@develer.com \
    --cc=axboe@kernel.dk \
    --cc=fengguang.wu@intel.com \
    --cc=jack@suse.cz \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).