From: Dan Williams <dan.j.williams@intel.com>
To: axboe@kernel.dk, linux-nvdimm@lists.01.org
Cc: boaz@plexistor.com, toshi.kani@hp.com,
Vishal Verma <vishal.l.verma@linux.intel.com>,
linux-kernel@vger.kernel.org, hch@lst.de,
linux-acpi@vger.kernel.org, linux-fsdevel@vger.kernel.org,
mingo@kernel.org
Subject: [PATCH 12/15] libnvdimm: enable iostat
Date: Wed, 17 Jun 2015 19:55:51 -0400 [thread overview]
Message-ID: <20150617235551.12943.76656.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <20150617235209.12943.24419.stgit@dwillia2-desk3.amr.corp.intel.com>
This is disabled by default as the overhead is prohibitive, but if the
user takes the action to turn it on we'll oblige.
Reviewed-by: Vishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
drivers/nvdimm/Kconfig | 14 ++++++++++++++
drivers/nvdimm/blk.c | 7 ++++++-
drivers/nvdimm/btt.c | 7 ++++++-
drivers/nvdimm/core.c | 31 +++++++++++++++++++++++++++++++
drivers/nvdimm/nd.h | 13 +++++++++++++
drivers/nvdimm/pmem.c | 5 +++++
6 files changed, 75 insertions(+), 2 deletions(-)
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 912cb36b8435..9d72085a67c9 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -73,4 +73,18 @@ config ND_MAX_REGIONS
Leave the default of 64 if you are unsure.
+config ND_IOSTAT
+ bool "Enable iostat by default"
+ default n
+ ---help---
+ Persistent memory i/o has very low latency to the point
+ where the overhead to measure statistics can dramatically
+ impact the relative performance of the driver. Say y here
+ to trade off performance for statistics gathering that is
+ enabled by default. These statistics can always be
+ enabled/disabled at run time via the 'iostat' attribute of
+ the block device's queue in sysfs.
+
+ If unsure, say N
+
endif
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 9d609ef95266..8a65e5a500d8 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -168,8 +168,10 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
struct bio_integrity_payload *bip;
struct nd_blk_device *blk_dev;
struct bvec_iter iter;
+ unsigned long start;
struct bio_vec bvec;
int err = 0, rw;
+ bool do_acct;
if (unlikely(bio_end_sector(bio) > get_capacity(disk))) {
err = -EIO;
@@ -191,6 +193,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
blk_dev = disk->private_data;
rw = bio_data_dir(bio);
+ do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@@ -202,9 +205,11 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
"io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE",
(unsigned long long) iter.bi_sector, len);
- goto out;
+ break;
}
}
+ if (do_acct)
+ nd_iostat_end(bio, start);
out:
bio_endio(bio, err);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 83b798dd2e68..67484633c322 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1178,8 +1178,10 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
struct block_device *bdev = bio->bi_bdev;
struct btt *btt = q->queuedata;
struct bvec_iter iter;
+ unsigned long start;
struct bio_vec bvec;
int err = 0, rw;
+ bool do_acct;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) {
err = -EIO;
@@ -1199,6 +1201,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
goto out;
}
+ do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@@ -1215,9 +1218,11 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
"io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE",
(unsigned long long) iter.bi_sector, len);
- goto out;
+ break;
}
}
+ if (do_acct)
+ nd_iostat_end(bio, start);
out:
bio_endio(bio, err);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index d27b13357873..99cf95af5f24 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -218,9 +218,40 @@ void nd_blk_queue_init(struct request_queue *q)
{
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
+ if (IS_ENABLED(CONFIG_ND_IOSTAT))
+ queue_flag_set_unlocked(QUEUE_FLAG_IO_STAT, q);
}
EXPORT_SYMBOL(nd_blk_queue_init);
+void __nd_iostat_start(struct bio *bio, unsigned long *start)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ const int rw = bio_data_dir(bio);
+ int cpu = part_stat_lock();
+
+ *start = jiffies;
+ part_round_stats(cpu, &disk->part0);
+ part_stat_inc(cpu, &disk->part0, ios[rw]);
+ part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
+ part_inc_in_flight(&disk->part0, rw);
+ part_stat_unlock();
+}
+EXPORT_SYMBOL(__nd_iostat_start);
+
+void nd_iostat_end(struct bio *bio, unsigned long start)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ unsigned long duration = jiffies - start;
+ const int rw = bio_data_dir(bio);
+ int cpu = part_stat_lock();
+
+ part_stat_add(cpu, &disk->part0, ticks[rw], duration);
+ part_round_stats(cpu, &disk->part0);
+ part_dec_in_flight(&disk->part0, rw);
+ part_stat_unlock();
+}
+EXPORT_SYMBOL(nd_iostat_end);
+
static ssize_t commands_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 2f20d5dca028..3c4c8b6c64ec 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -13,6 +13,7 @@
#ifndef __ND_H__
#define __ND_H__
#include <linux/libnvdimm.h>
+#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/mutex.h>
@@ -172,5 +173,17 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
resource_size_t n);
int nd_blk_region_init(struct nd_region *nd_region);
void nd_blk_queue_init(struct request_queue *q);
+void __nd_iostat_start(struct bio *bio, unsigned long *start);
+static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+
+ if (!blk_queue_io_stat(disk->queue))
+ return false;
+
+ __nd_iostat_start(bio, start);
+ return true;
+}
+void nd_iostat_end(struct bio *bio, unsigned long start);
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
#endif /* __ND_H__ */
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 0337b00f5409..3fd854a78f09 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -59,6 +59,8 @@ static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
static void pmem_make_request(struct request_queue *q, struct bio *bio)
{
int err = 0;
+ bool do_acct;
+ unsigned long start;
struct bio_vec bvec;
struct bvec_iter iter;
struct block_device *bdev = bio->bi_bdev;
@@ -69,9 +71,12 @@ static void pmem_make_request(struct request_queue *q, struct bio *bio)
goto out;
}
+ do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter)
pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
bio_data_dir(bio), iter.bi_sector);
+ if (do_acct)
+ nd_iostat_end(bio, start);
out:
bio_endio(bio, err);
next prev parent reply other threads:[~2015-06-17 23:55 UTC|newest]
Thread overview: 70+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-06-17 23:54 [PATCH 00/15] libnvdimm: ->rw_bytes(), BLK-mode, unit tests, and misc features Dan Williams
2015-06-17 23:54 ` [PATCH 01/15] block: introduce an ->rw_bytes() block device operation Dan Williams
2015-06-18 19:25 ` Dan Williams
2015-06-17 23:54 ` [PATCH 02/15] libnvdimm: infrastructure for btt devices Dan Williams
2015-06-22 16:34 ` Christoph Hellwig
2015-06-22 16:48 ` Dan Williams
2015-06-22 16:48 ` Christoph Hellwig
2015-06-22 18:32 ` Jeff Moyer
2015-06-22 19:02 ` Dan Williams
2015-06-22 19:09 ` Jeff Moyer
2015-06-23 10:20 ` Christoph Hellwig
2015-06-23 10:19 ` Christoph Hellwig
2015-06-23 15:19 ` Dan Williams
2015-06-23 20:33 ` Dan Williams
2015-06-24 12:10 ` Christoph Hellwig
2015-06-23 10:10 ` Christoph Hellwig
2015-06-17 23:55 ` [PATCH 03/15] nd_btt: atomic sector updates Dan Williams
2015-06-21 10:03 ` Christoph Hellwig
2015-06-21 16:31 ` Dan Williams
2015-06-17 23:55 ` [PATCH 04/15] libnvdimm, nfit, nd_blk: driver for BLK-mode access persistent memory Dan Williams
2015-06-21 10:05 ` Christoph Hellwig
2015-06-21 13:31 ` Dan Williams
2015-06-21 13:56 ` Christoph Hellwig
2015-06-21 14:39 ` Dan Williams
2015-06-17 23:55 ` [PATCH 05/15] tools/testing/nvdimm: libnvdimm unit test infrastructure Dan Williams
2015-06-17 23:55 ` [PATCH 06/15] libnvdimm: Non-Volatile Devices Dan Williams
2015-06-17 23:55 ` [PATCH 07/15] fs/block_dev.c: skip rw_page if bdev has integrity Dan Williams
2015-06-17 23:55 ` [PATCH 08/15] libnvdimm, btt: add support for blk integrity Dan Williams
2015-06-17 23:55 ` [PATCH 09/15] libnvdimm, blk: " Dan Williams
2015-06-17 23:55 ` [PATCH 10/15] libnvdimm: fix up max_hw_sectors Dan Williams
2015-06-21 10:08 ` Christoph Hellwig
2015-06-21 13:28 ` Dan Williams
2015-06-17 23:55 ` [PATCH 11/15] libnvdimm: pmem, blk, and btt make_request cleanups Dan Williams
2015-06-21 10:10 ` Christoph Hellwig
2015-06-21 13:26 ` Dan Williams
2015-06-17 23:55 ` Dan Williams [this message]
2015-06-19 8:34 ` [PATCH 12/15] libnvdimm: enable iostat Christoph Hellwig
2015-06-19 9:02 ` Dan Williams
2015-06-21 10:11 ` Christoph Hellwig
2015-06-21 13:22 ` Dan Williams
2015-06-17 23:55 ` [PATCH 13/15] libnvdimm: flag libnvdimm block devices as non-rotational Dan Williams
2015-06-17 23:56 ` [PATCH 14/15] libnvdimm: support read-only btt backing devices Dan Williams
2015-06-18 22:55 ` Vishal Verma
2015-06-21 10:13 ` Christoph Hellwig
2015-06-21 13:21 ` Dan Williams
2015-06-21 13:54 ` Christoph Hellwig
2015-06-21 15:11 ` Dan Williams
2015-06-22 6:30 ` Christoph Hellwig
2015-06-22 7:17 ` Dan Williams
2015-06-22 7:28 ` Christoph Hellwig
2015-06-22 7:39 ` Dan Williams
2015-06-22 15:02 ` Jeff Moyer
2015-06-22 15:41 ` Christoph Hellwig
2015-06-22 16:00 ` Jeff Moyer
2015-06-22 16:32 ` Christoph Hellwig
2015-06-22 16:42 ` Jeff Moyer
2015-06-22 16:48 ` Christoph Hellwig
2015-06-22 18:48 ` Jeff Moyer
2015-06-22 19:04 ` Dan Williams
2015-06-22 19:11 ` Jeff Moyer
2015-06-23 10:10 ` Christoph Hellwig
2015-06-22 15:40 ` Christoph Hellwig
2015-06-22 16:36 ` Dan Williams
2015-06-22 16:45 ` Christoph Hellwig
2015-06-22 16:54 ` Dan Williams
2015-06-22 16:57 ` Christoph Hellwig
2015-06-22 16:59 ` Dan Williams
2015-06-23 19:30 ` Matthew Wilcox
2015-06-24 12:11 ` Christoph Hellwig
2015-06-17 23:56 ` [PATCH 15/15] libnvdimm, nfit: handle acpi_nfit_memory_map flags Dan Williams
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20150617235551.12943.76656.stgit@dwillia2-desk3.amr.corp.intel.com \
--to=dan.j.williams@intel.com \
--cc=axboe@kernel.dk \
--cc=boaz@plexistor.com \
--cc=hch@lst.de \
--cc=linux-acpi@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvdimm@lists.01.org \
--cc=mingo@kernel.org \
--cc=toshi.kani@hp.com \
--cc=vishal.l.verma@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).