From: "Javier González" <jg@lightnvm.io>
To: axboe@fb.com
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
"Javier González" <javier@cnexlabs.com>
Subject: [PATCH 3/5] lightnvm: pblk: decouple read/erase mempools
Date: Thu, 14 Sep 2017 12:33:45 +0200 [thread overview]
Message-ID: <1505385227-28706-4-git-send-email-javier@cnexlabs.com> (raw)
In-Reply-To: <1505385227-28706-1-git-send-email-javier@cnexlabs.com>
Since read and erase paths offer different guarantees for inflight I/Os,
separate the mempools to set the right min_nr for each on creation.
Reported-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Javier González <javier@cnexlabs.com>
---
drivers/lightnvm/pblk-core.c | 12 ++++--------
drivers/lightnvm/pblk-init.c | 22 +++++++++++++++-------
drivers/lightnvm/pblk.h | 5 +++--
3 files changed, 22 insertions(+), 17 deletions(-)
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index b7a6b223b1a5..9e35ec7c1d9f 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -64,7 +64,7 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
struct pblk *pblk = rqd->private;
__pblk_end_io_erase(pblk, rqd);
- mempool_free(rqd, pblk->g_rq_pool);
+ mempool_free(rqd, pblk->e_rq_pool);
}
void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
@@ -161,13 +161,11 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
pool = pblk->w_rq_pool;
rq_size = pblk_w_rq_size;
} else {
- pool = pblk->g_rq_pool;
+ pool = pblk->r_rq_pool;
rq_size = pblk_g_rq_size;
}
rqd = mempool_alloc(pool, GFP_KERNEL);
- if (!rqd)
- return NULL;
memset(rqd, 0, rq_size);
return rqd;
@@ -180,7 +178,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
if (rw == WRITE)
pool = pblk->w_rq_pool;
else
- pool = pblk->g_rq_pool;
+ pool = pblk->r_rq_pool;
mempool_free(rqd, pool);
}
@@ -1479,9 +1477,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_rq *rqd;
int err;
- rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
- if (!rqd)
- return -ENOMEM;
+ rqd = mempool_alloc(pblk->e_rq_pool, GFP_KERNEL);
memset(rqd, 0, pblk_g_rq_size);
pblk_setup_e_rq(pblk, rqd, ppa);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 0c65abb96dc6..5f8a9fd2f29a 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -261,15 +261,20 @@ static int pblk_core_init(struct pblk *pblk)
if (!pblk->rec_pool)
goto free_gen_ws_pool;
- pblk->g_rq_pool = mempool_create_slab_pool(PBLK_READ_REQ_POOL_SIZE,
+ pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
pblk_g_rq_cache);
- if (!pblk->g_rq_pool)
+ if (!pblk->r_rq_pool)
goto free_rec_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns * 2,
+ pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk_g_rq_cache);
+ if (!pblk->e_rq_pool)
+ goto free_r_rq_pool;
+
+ pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
pblk_w_rq_cache);
if (!pblk->w_rq_pool)
- goto free_g_rq_pool;
+ goto free_e_rq_pool;
pblk->line_meta_pool =
mempool_create_slab_pool(PBLK_META_POOL_SIZE,
@@ -304,8 +309,10 @@ static int pblk_core_init(struct pblk *pblk)
mempool_destroy(pblk->line_meta_pool);
free_w_rq_pool:
mempool_destroy(pblk->w_rq_pool);
-free_g_rq_pool:
- mempool_destroy(pblk->g_rq_pool);
+free_e_rq_pool:
+ mempool_destroy(pblk->e_rq_pool);
+free_r_rq_pool:
+ mempool_destroy(pblk->r_rq_pool);
free_rec_pool:
mempool_destroy(pblk->rec_pool);
free_gen_ws_pool:
@@ -326,7 +333,8 @@ static void pblk_core_free(struct pblk *pblk)
mempool_destroy(pblk->page_bio_pool);
mempool_destroy(pblk->gen_ws_pool);
mempool_destroy(pblk->rec_pool);
- mempool_destroy(pblk->g_rq_pool);
+ mempool_destroy(pblk->r_rq_pool);
+ mempool_destroy(pblk->e_rq_pool);
mempool_destroy(pblk->w_rq_pool);
mempool_destroy(pblk->line_meta_pool);
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index dee8f66e6ce2..0120e0ade703 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -41,7 +41,6 @@
#define PBLK_MAX_REQ_ADDRS_PW (6)
#define PBLK_META_POOL_SIZE (128)
-#define PBLK_READ_REQ_POOL_SIZE (1024)
#define PBLK_NR_CLOSE_JOBS (4)
@@ -60,6 +59,7 @@
#define ERASE 2 /* READ = 0, WRITE = 1 */
+/* Static pool sizes */
#define PBLK_GEN_WS_POOL_SIZE (2)
enum {
@@ -623,8 +623,9 @@ struct pblk {
mempool_t *page_bio_pool;
mempool_t *gen_ws_pool;
mempool_t *rec_pool;
- mempool_t *g_rq_pool;
+ mempool_t *r_rq_pool;
mempool_t *w_rq_pool;
+ mempool_t *e_rq_pool;
mempool_t *line_meta_pool;
struct workqueue_struct *close_wq;
--
2.7.4
next prev parent reply other threads:[~2017-09-14 10:34 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-09-14 10:33 [PATCH 0/5] lightnvm: pblk: audit mempool usage Javier González
2017-09-14 10:33 ` [PATCH 1/5] lightnvm: pblk: fix min size for page mempool Javier González
2017-09-14 10:33 ` [PATCH 2/5] lightnvm: pblk: simplify work_queue mempool Javier González
2017-09-14 10:33 ` Javier González [this message]
2017-09-14 10:33 ` [PATCH 4/5] lightnvm: pblk: do not use a mempool for line bitmaps Javier González
2017-09-14 10:33 ` [PATCH 5/5] lightnvm: pblk: remove checks on mempool alloc Javier González
2017-10-02 17:58 ` [PATCH 0/5] lightnvm: pblk: audit mempool usage Rakesh Pandit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1505385227-28706-4-git-send-email-javier@cnexlabs.com \
--to=jg@lightnvm.io \
--cc=axboe@fb.com \
--cc=javier@cnexlabs.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).