From: Christoph Hellwig <hch@lst.de>
To: shli@fb.com, neilb@suse.com
Cc: linux-raid@vger.kernel.org
Subject: [PATCH 1/2] raid5-cache: use a bio_set
Date: Wed, 2 Dec 2015 17:10:37 +0100 [thread overview]
Message-ID: <1449072638-15409-2-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1449072638-15409-1-git-send-email-hch@lst.de>
This allows us to make guaranteed forward progress.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
drivers/md/raid5-cache.c | 16 +++++++++++++++-
1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 668e973..ef59564 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -34,6 +34,12 @@
#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
+/*
+ * We only need 2 bios per I/O unit to make progress, but ensure we
+ * have a few more available to not get too tight.
+ */
+#define R5L_POOL_SIZE 1024
+
struct r5l_log {
struct md_rdev *rdev;
@@ -70,6 +76,7 @@ struct r5l_log {
struct bio flush_bio;
struct kmem_cache *io_kc;
+ struct bio_set *bs;
struct md_thread *reclaim_thread;
unsigned long reclaim_target; /* number of space that need to be
@@ -248,7 +255,7 @@ static void r5l_submit_current_io(struct r5l_log *log)
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
- struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
+ struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
bio->bi_rw = WRITE;
bio->bi_bdev = log->rdev->bdev;
@@ -1153,6 +1160,10 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
if (!log->io_kc)
goto io_kc;
+ log->bs = bioset_create(R5L_POOL_SIZE, 0);
+ if (!log->bs)
+ goto io_bs;
+
log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
log->rdev->mddev, "reclaim");
if (!log->reclaim_thread)
@@ -1170,6 +1181,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
error:
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
+ bioset_free(log->bs);
+io_bs:
kmem_cache_destroy(log->io_kc);
io_kc:
kfree(log);
@@ -1179,6 +1192,7 @@ io_kc:
void r5l_exit_log(struct r5l_log *log)
{
md_unregister_thread(&log->reclaim_thread);
+ bioset_free(log->bs);
kmem_cache_destroy(log->io_kc);
kfree(log);
}
--
1.9.1
next prev parent reply other threads:[~2015-12-02 16:10 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-12-02 16:10 [RFC] using mempools for raid5-cache Christoph Hellwig
2015-12-02 16:10 ` Christoph Hellwig [this message]
2015-12-03 5:01 ` [PATCH 1/2] raid5-cache: use a bio_set Shaohua Li
2015-12-08 23:22 ` NeilBrown
2015-12-14 21:12 ` Christoph Hellwig
2015-12-02 16:10 ` [PATCH 2/2] raid5-cache: use a mempool for the metadata block Christoph Hellwig
2015-12-08 23:27 ` NeilBrown
2015-12-03 4:49 ` [RFC] using mempools for raid5-cache Shaohua Li
2015-12-09 0:36 ` NeilBrown
2015-12-09 1:28 ` Shaohua Li
2015-12-09 6:34 ` NeilBrown
2015-12-10 23:40 ` Shaohua Li
2015-12-11 0:09 ` NeilBrown
2015-12-11 1:10 ` Shaohua Li
2015-12-11 1:56 ` NeilBrown
2015-12-09 13:51 ` Wols Lists
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1449072638-15409-2-git-send-email-hch@lst.de \
--to=hch@lst.de \
--cc=linux-raid@vger.kernel.org \
--cc=neilb@suse.com \
--cc=shli@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).