* [PATCH v2 1/2] lightnvm: specify target's logical address area
@ 2016-01-26 12:33 Wenwei Tao
2016-01-26 12:33 ` [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support Wenwei Tao
2016-01-26 14:52 ` [PATCH v2 1/2] lightnvm: specify target's logical address area Matias Bjørling
0 siblings, 2 replies; 15+ messages in thread
From: Wenwei Tao @ 2016-01-26 12:33 UTC (permalink / raw)
To: mb; +Cc: linux-kernel, linux-block
We can create more than one target on a lightnvm
device by specifying its begin lun and end lun.
But only specify the physical address area is not
enough, we need to get the corresponding non-
intersection logical address area division from
the backend device's logcial address space.
Otherwise the targets on the device might use
the same logical addresses cause incorrect
information in the device's l2p table.
Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
---
Changes since v1:
- rename some variables
- add parentheses for clarity
- make gennvm_get_area return int, and add one more sector_t* parameter
to pass the begin sector of the corresponding target
- rebase to v4.5-rc1
drivers/lightnvm/core.c | 1 +
drivers/lightnvm/gennvm.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++
drivers/lightnvm/gennvm.h | 6 +++++
drivers/lightnvm/rrpc.c | 45 +++++++++++++++++++++++++++++++++---
drivers/lightnvm/rrpc.h | 1 +
include/linux/lightnvm.h | 8 +++++++
6 files changed, 117 insertions(+), 3 deletions(-)
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb..27a59e8 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
INIT_LIST_HEAD(&dev->online_targets);
mutex_init(&dev->mlock);
+ spin_lock_init(&dev->lock);
return 0;
}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 7fb725b..34ea4ff 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,60 @@
#include "gennvm.h"
+static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
+ sector_t size)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area, *prev;
+ sector_t begin = 0;
+ int page_size = dev->sec_size * dev->sec_per_pg;
+ sector_t max_sectors = (page_size * dev->total_pages) >> 9;
+
+ if (size > max_sectors)
+ return -EINVAL;
+ area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+ if (!area)
+ return -ENOMEM;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(prev, &gn->area_list, list) {
+ if (begin + size > prev->begin) {
+ begin = prev->end;
+ continue;
+ }
+ break;
+ }
+
+ if ((begin + size) > max_sectors) {
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return -EINVAL;
+ }
+
+ area->begin = *begin_sect = begin;
+ area->end = begin + size;
+ list_add(&area->list, &prev->list);
+ spin_unlock(&dev->lock);
+ return 0;
+}
+
+static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(area, &gn->area_list, list) {
+ if (area->begin == begin) {
+ list_del(&area->list);
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return;
+ }
+ }
+ spin_unlock(&dev->lock);
+}
+
static void gennvm_blocks_free(struct nvm_dev *dev)
{
struct gen_nvm *gn = dev->mp;
@@ -230,6 +284,7 @@ static int gennvm_register(struct nvm_dev *dev)
gn->dev = dev;
gn->nr_luns = dev->nr_luns;
+ INIT_LIST_HEAD(&gn->area_list);
dev->mp = gn;
ret = gennvm_luns_init(dev, gn);
@@ -466,6 +521,10 @@ static struct nvmm_type gennvm = {
.get_lun = gennvm_get_lun,
.lun_info_print = gennvm_lun_info_print,
+
+ .get_area = gennvm_get_area,
+ .put_area = gennvm_put_area,
+
};
static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 9c24b5b..04d7c23 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -39,8 +39,14 @@ struct gen_nvm {
int nr_luns;
struct gen_lun *luns;
+ struct list_head area_list;
};
+struct gennvm_area {
+ struct list_head list;
+ sector_t begin;
+ sector_t end; /* end is excluded */
+};
#define gennvm_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c7595..c8c27f9 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1042,7 +1042,18 @@ static int rrpc_map_init(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
sector_t i;
- int ret;
+ u64 slba;
+ int ret, page_size;
+ int page_shfit, nr_pages;
+
+ page_size = dev->sec_per_pg * dev->sec_size;
+ page_shfit = ilog2(page_size);
+ nr_pages = rrpc->nr_luns *
+ dev->nr_planes *
+ dev->blks_per_lun *
+ dev->pgs_per_blk;
+ slba = rrpc->soffset >> (page_shfit - 9);
+
rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
if (!rrpc->trans_map)
@@ -1065,8 +1076,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
- rrpc_l2p_update, rrpc);
+ ret = dev->ops->get_l2p_tbl(dev, slba, nr_pages, rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1189,12 +1199,33 @@ err:
return -ENOMEM;
}
+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+ sector_t size = rrpc->nr_luns *
+ dev->sec_per_lun *
+ dev->sec_size;
+
+ size >>= 9;
+ return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+
+ mt->put_area(dev, rrpc->soffset);
+}
+
static void rrpc_free(struct rrpc *rrpc)
{
rrpc_gc_free(rrpc);
rrpc_map_free(rrpc);
rrpc_core_free(rrpc);
rrpc_luns_free(rrpc);
+ rrpc_area_free(rrpc);
kfree(rrpc);
}
@@ -1315,6 +1346,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
struct rrpc *rrpc;
+ sector_t soffset;
int ret;
if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1340,6 +1372,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
+ ret = rrpc_area_init(rrpc, &soffset);
+ if (ret < 0) {
+ pr_err("nvm: rrpc: could not initialize area\n");
+ return ERR_PTR(ret);
+ }
+ rrpc->soffset = soffset;
+
ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7..9380c68 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -97,6 +97,7 @@ struct rrpc {
struct nvm_dev *dev;
struct gendisk *disk;
+ sector_t soffset; /* logical sector offset */
u64 poffset; /* physical page offset */
int lun_offset;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d675011..18f1bb0 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -351,6 +351,7 @@ struct nvm_dev {
char name[DISK_NAME_LEN];
struct mutex mlock;
+ spinlock_t lock;
};
static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
+typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
+typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+
struct nvmm_type {
const char *name;
unsigned int version[3];
@@ -487,6 +491,10 @@ struct nvmm_type {
/* Statistics */
nvmm_lun_info_print_fn *lun_info_print;
+
+ nvmm_get_area_fn *get_area;
+ nvmm_put_area_fn *put_area;
+
struct list_head list;
};
--
1.8.3.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support
2016-01-26 12:33 [PATCH v2 1/2] lightnvm: specify target's logical address area Wenwei Tao
@ 2016-01-26 12:33 ` Wenwei Tao
2016-01-27 9:44 ` Matias Bjørling
2016-01-26 14:52 ` [PATCH v2 1/2] lightnvm: specify target's logical address area Matias Bjørling
1 sibling, 1 reply; 15+ messages in thread
From: Wenwei Tao @ 2016-01-26 12:33 UTC (permalink / raw)
To: mb; +Cc: linux-kernel, linux-block
When create a target, we specify the begin lunid and
the end lunid, and get the corresponding continuous
luns from media manager, if one of the luns is not free,
we failed to create the target, even if the device's
total free luns are enough.
So add non-continuous lun target creation support,
thus we can improve the backend device's space utilization.
Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
---
Changes since v1:
-use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun
-add target creation flags check
-rebase to v4.5-rc1
drivers/lightnvm/core.c | 36 ++++---
drivers/lightnvm/gennvm.c | 42 ++++++++-
drivers/lightnvm/rrpc.c | 215 +++++++++++++++++++++++++++---------------
drivers/lightnvm/rrpc.h | 6 +-
include/linux/lightnvm.h | 24 ++++-
include/uapi/linux/lightnvm.h | 3 +
6 files changed, 229 insertions(+), 97 deletions(-)
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 27a59e8..7de5855 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -28,7 +28,6 @@
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
-#include <uapi/linux/lightnvm.h>
static LIST_HEAD(nvm_targets);
static LIST_HEAD(nvm_mgrs);
@@ -468,6 +467,11 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->luns_per_chnl *
dev->nr_chnls;
dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dev->lun_map)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&dev->online_targets);
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
@@ -610,6 +614,7 @@ void nvm_unregister(char *disk_name)
up_write(&nvm_lock);
nvm_exit(dev);
+ kfree(dev->lun_map);
kfree(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -626,6 +631,7 @@ static int nvm_create_target(struct nvm_dev *dev,
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
struct nvm_target *t;
+ unsigned long flags;
void *targetdata;
if (!dev->mt) {
@@ -670,7 +676,8 @@ static int nvm_create_target(struct nvm_dev *dev,
tdisk->fops = &nvm_fops;
tdisk->queue = tqueue;
- targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+ flags = calc_nvm_create_bits(create->flags);
+ targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end, flags);
if (IS_ERR(targetdata))
goto err_init;
@@ -736,6 +743,15 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
pr_err("nvm: config type not valid\n");
return -EINVAL;
}
+
+ if (create->flags) {
+ if (!(create->flags & NVM_C_FLAGS) ||
+ (create->flags & ~NVM_C_FLAGS)) {
+ pr_err("nvm: create flags not supported\n");
+ return -EINVAL;
+ }
+ }
+
s = &create->conf.s;
if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
@@ -824,15 +840,14 @@ static int nvm_configure_create(const char *val)
char opcode;
int lun_begin, lun_end, ret;
- ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
- create.tgtname, create.tgttype,
- &lun_begin, &lun_end);
- if (ret != 6) {
- pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
+ ret = sscanf(val, "%c %256s %256s %48s %u:%u %u", &opcode, create.dev,
+ create.tgtname, create.tgttype,
+ &lun_begin, &lun_end, &create.flags);
+ if (ret != 7) {
+ pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end flags\".\n");
return -EINVAL;
}
- create.flags = 0;
create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
create.conf.s.lun_begin = lun_begin;
create.conf.s.lun_end = lun_end;
@@ -1002,11 +1017,6 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
create.tgtname[DISK_NAME_LEN - 1] = '\0';
- if (create.flags != 0) {
- pr_err("nvm: no flags supported\n");
- return -EINVAL;
- }
-
return __nvm_configure_create(&create);
}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 34ea4ff..0400390 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -185,6 +185,9 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
lun_id = div_u64(pba, dev->sec_per_lun);
lun = &gn->luns[lun_id];
+ if (!test_bit(lun_id, dev->lun_map))
+ __set_bit(lun_id, dev->lun_map);
+
/* Calculate block offset into lun */
pba = pba - (dev->sec_per_lun * lun_id);
blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
@@ -475,11 +478,45 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
return nvm_erase_ppa(dev, &addr, 1);
}
-static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
+static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid,
+ unsigned long flags)
{
struct gen_nvm *gn = dev->mp;
+ unsigned long *lun_map = dev->lun_map;
+ struct nvm_lun *lun = NULL;
+ int id;
+
+ if (WARN_ON(lunid >= dev->nr_luns))
+ return NULL;
+
+ if (flags & NVM_NOALLOC)
+ return &gn->luns[lunid].vlun;
+
+ spin_lock(&dev->lock);
+ if (flags & NVM_FIXED) {
+ if (test_and_set_bit(lunid, lun_map)) {
+ pr_err("gennvm: lun %u is inuse\n", lunid);
+ goto out;
+ } else {
+ lun = &gn->luns[lunid].vlun;
+ goto out;
+ }
+ }
+ id = find_next_zero_bit(lun_map, dev->nr_luns, 0);
+ if (id < dev->nr_luns) {
+ __set_bit(id, lun_map);
+ lun = &gn->luns[id].vlun;
+ } else
+ pr_err("gennvm: dev %s has no free luns\n", dev->name);
+
+out:
+ spin_unlock(&dev->lock);
+ return lun;
+}
- return &gn->luns[lunid].vlun;
+static inline void gennvm_put_lun(struct nvm_dev *dev, int lunid)
+{
+ WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
}
static void gennvm_lun_info_print(struct nvm_dev *dev)
@@ -520,6 +557,7 @@ static struct nvmm_type gennvm = {
.erase_blk = gennvm_erase_blk,
.get_lun = gennvm_get_lun,
+ .put_lun = gennvm_put_lun,
.lun_info_print = gennvm_lun_info_print,
.get_area = gennvm_get_area,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index c8c27f9..59bca9c 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -23,28 +23,35 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags);
#define rrpc_for_each_lun(rrpc, rlun, i) \
- for ((i) = 0, rlun = &(rrpc)->luns[0]; \
- (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
+ for ((i) = 0, rlun = &(rrpc)->luns[0]; \
+ (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
+
+static inline u64 lun_poffset(struct nvm_lun *lun, struct nvm_dev *dev)
+{
+ return lun->id * dev->sec_per_lun;
+}
static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
{
struct rrpc_block *rblk = a->rblk;
- unsigned int pg_offset;
+ struct rrpc_lun *rlun = rblk->rlun;
+ u64 pg_offset;
- lockdep_assert_held(&rrpc->rev_lock);
+ lockdep_assert_held(&rlun->rev_lock);
if (a->addr == ADDR_EMPTY || !rblk)
return;
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+ div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, (u32 *)&pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
spin_unlock(&rblk->lock);
- rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+ pg_offset = lun_poffset(rlun->parent, rrpc->dev);
+ rlun->rev_trans_map[a->addr - pg_offset].addr = ADDR_EMPTY;
}
static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -52,14 +59,15 @@ static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
{
sector_t i;
- spin_lock(&rrpc->rev_lock);
for (i = slba; i < slba + len; i++) {
struct rrpc_addr *gp = &rrpc->trans_map[i];
+ struct rrpc_lun *rlun = gp->rblk->rlun;
+ spin_lock(&rlun->rev_lock);
rrpc_page_invalidate(rrpc, gp);
+ spin_unlock(&rlun->rev_lock);
gp->rblk = NULL;
}
- spin_unlock(&rrpc->rev_lock);
}
static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
@@ -281,13 +289,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
struct request_queue *q = rrpc->dev->q;
+ struct rrpc_lun *rlun = rblk->rlun;
struct rrpc_rev_addr *rev;
struct nvm_rq *rqd;
struct bio *bio;
struct page *page;
int slot;
int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
- u64 phys_addr;
+ u64 phys_addr, poffset;
DECLARE_COMPLETION_ONSTACK(wait);
if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
@@ -303,6 +312,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
if (!page)
return -ENOMEM;
+ poffset = lun_poffset(rlun->parent, rrpc->dev);
while ((slot = find_first_zero_bit(rblk->invalid_pages,
nr_pgs_per_blk)) < nr_pgs_per_blk) {
@@ -310,23 +320,23 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
try:
- spin_lock(&rrpc->rev_lock);
+ spin_lock(&rlun->rev_lock);
/* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+ rev = &rlun->rev_trans_map[phys_addr - poffset];
/* already updated by previous regular write */
if (rev->addr == ADDR_EMPTY) {
- spin_unlock(&rrpc->rev_lock);
+ spin_unlock(&rlun->rev_lock);
continue;
}
rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
if (IS_ERR_OR_NULL(rqd)) {
- spin_unlock(&rrpc->rev_lock);
+ spin_unlock(&rlun->rev_lock);
schedule();
goto try;
}
- spin_unlock(&rrpc->rev_lock);
+ spin_unlock(&rlun->rev_lock);
/* Perform read to do GC */
bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
@@ -395,7 +405,7 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc_block *rblk = gcb->rblk;
struct nvm_dev *dev = rrpc->dev;
struct nvm_lun *lun = rblk->parent->lun;
- struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+ struct rrpc_lun *rlun = lun->private;
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
@@ -497,7 +507,7 @@ static void rrpc_gc_queue(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct nvm_lun *lun = rblk->parent->lun;
- struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+ struct rrpc_lun *rlun = lun->private;
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
@@ -540,22 +550,24 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
struct rrpc_block *rblk, u64 paddr)
{
+ struct rrpc_lun *rlun = rblk->rlun;
struct rrpc_addr *gp;
struct rrpc_rev_addr *rev;
+ u64 poffset = lun_poffset(rlun->parent, rrpc->dev);
BUG_ON(laddr >= rrpc->nr_pages);
gp = &rrpc->trans_map[laddr];
- spin_lock(&rrpc->rev_lock);
+ spin_lock(&rlun->rev_lock);
if (gp->rblk)
rrpc_page_invalidate(rrpc, gp);
gp->addr = paddr;
gp->rblk = rblk;
- rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+ rev = &rlun->rev_trans_map[gp->addr - poffset];
rev->addr = laddr;
- spin_unlock(&rrpc->rev_lock);
+ spin_unlock(&rlun->rev_lock);
return gp;
}
@@ -956,8 +968,6 @@ static void rrpc_requeue(struct work_struct *work)
static void rrpc_gc_free(struct rrpc *rrpc)
{
- struct rrpc_lun *rlun;
- int i;
if (rrpc->krqd_wq)
destroy_workqueue(rrpc->krqd_wq);
@@ -965,16 +975,6 @@ static void rrpc_gc_free(struct rrpc *rrpc)
if (rrpc->kgc_wq)
destroy_workqueue(rrpc->kgc_wq);
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- if (!rlun->blocks)
- break;
- vfree(rlun->blocks);
- }
}
static int rrpc_gc_init(struct rrpc *rrpc)
@@ -995,7 +995,6 @@ static int rrpc_gc_init(struct rrpc *rrpc)
static void rrpc_map_free(struct rrpc *rrpc)
{
- vfree(rrpc->rev_trans_map);
vfree(rrpc->trans_map);
}
@@ -1003,19 +1002,28 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct rrpc *rrpc = (struct rrpc *)private;
struct nvm_dev *dev = rrpc->dev;
- struct rrpc_addr *addr = rrpc->trans_map + slba;
- struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
+ struct rrpc_addr *addr;
+ struct rrpc_rev_addr *raddr;
sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
- u64 elba = slba + nlb;
- u64 i;
+ int page_size = dev->sec_per_pg * dev->sec_size;
+ u64 elba, i;
+
+ elba = slba + nlb;
if (unlikely(elba > dev->total_pages)) {
pr_err("nvm: L2P data from device is out of bounds!\n");
return -EINVAL;
}
+ slba -= rrpc->soffset >> (ilog2(page_size) - 9);
+ addr = rrpc->trans_map + slba;
for (i = 0; i < nlb; i++) {
+ struct rrpc_lun *rlun;
+ struct nvm_lun *lun;
u64 pba = le64_to_cpu(entries[i]);
+ u64 poffset;
+ int lunid;
+
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
@@ -1031,8 +1039,15 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
if (!pba)
continue;
+ lunid = div_u64(pba, dev->sec_per_lun);
+ lun = dev->mt->get_lun(dev, lunid, NVM_NOALLOC);
+ if (unlikely(!lun))
+ return -EINVAL;
+ rlun = lun->private;
+ raddr = rlun->rev_trans_map;
+ poffset = lun_poffset(lun, dev);
addr[i].addr = pba;
- raddr[pba].addr = slba + i;
+ raddr[pba - poffset].addr = slba + i;
}
return 0;
@@ -1059,17 +1074,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
if (!rrpc->trans_map)
return -ENOMEM;
- rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_pages);
- if (!rrpc->rev_trans_map)
- return -ENOMEM;
for (i = 0; i < rrpc->nr_pages; i++) {
struct rrpc_addr *p = &rrpc->trans_map[i];
- struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
p->addr = ADDR_EMPTY;
- r->addr = ADDR_EMPTY;
}
if (!dev->ops->get_l2p_tbl)
@@ -1140,21 +1149,85 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
+ struct nvm_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun;
+ struct nvm_lun *lun;
+ int i;
+
+ if (!rrpc->luns)
+ return;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ rlun = &rrpc->luns[i];
+ if (!rlun)
+ break;
+ lun = rlun->parent;
+ dev->mt->put_lun(dev, lun->id);
+ vfree(rlun->rev_trans_map);
+ vfree(rlun->blocks);
+ }
kfree(rrpc->luns);
+ rrpc->luns = NULL;
+
+}
+
+static int rrpc_lun_init(struct rrpc *rrpc, struct rrpc_lun *rlun,
+ struct nvm_lun *lun)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ int i;
+
+ rlun->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) *
+ dev->sec_per_lun);
+ if (!rlun->rev_trans_map)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->sec_per_lun; i++) {
+ struct rrpc_rev_addr *r = &rlun->rev_trans_map[i];
+
+ r->addr = ADDR_EMPTY;
+ }
+ rlun->blocks = vzalloc(sizeof(struct rrpc_block) * dev->blks_per_lun);
+ if (!rlun->blocks) {
+ vfree(rlun->rev_trans_map);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < dev->blks_per_lun; i++) {
+ struct rrpc_block *rblk = &rlun->blocks[i];
+ struct nvm_block *blk = &lun->blocks[i];
+
+ rblk->parent = blk;
+ rblk->rlun = rlun;
+ INIT_LIST_HEAD(&rblk->prio);
+ spin_lock_init(&rblk->lock);
+ }
+
+ rlun->rrpc = rrpc;
+ rlun->parent = lun;
+ lun->private = rlun;
+ INIT_LIST_HEAD(&rlun->prio_list);
+ INIT_LIST_HEAD(&rlun->open_list);
+ INIT_LIST_HEAD(&rlun->closed_list);
+ INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+ spin_lock_init(&rlun->lock);
+ spin_lock_init(&rlun->rev_lock);
+
+ return 0;
}
-static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end,
+ unsigned long flags)
{
struct nvm_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
- int i, j;
+ int i, ret;
if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
- spin_lock_init(&rrpc->rev_lock);
rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
GFP_KERNEL);
@@ -1163,40 +1236,29 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
+ struct nvm_lun *lun = dev->mt->get_lun(dev,
+ lun_begin + i, flags);
- rlun = &rrpc->luns[i];
- rlun->rrpc = rrpc;
- rlun->parent = lun;
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->open_list);
- INIT_LIST_HEAD(&rlun->closed_list);
+ if (!lun) {
+ ret = -EINVAL;
+ goto err;
+ }
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
+ rlun = &rrpc->luns[i];
+ ret = rrpc_lun_init(rrpc, rlun, lun);
+ if (!ret)
+ goto err;
rrpc->total_blocks += dev->blks_per_lun;
rrpc->nr_pages += dev->sec_per_lun;
- rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- rrpc->dev->blks_per_lun);
- if (!rlun->blocks)
- goto err;
-
- for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
- struct rrpc_block *rblk = &rlun->blocks[j];
- struct nvm_block *blk = &lun->blocks[j];
-
- rblk->parent = blk;
- rblk->rlun = rlun;
- INIT_LIST_HEAD(&rblk->prio);
- spin_lock_init(&rblk->lock);
- }
}
return 0;
err:
- return -ENOMEM;
+ rrpc_luns_free(rrpc);
+ return ret;
+
}
static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
@@ -1269,14 +1331,16 @@ static sector_t rrpc_capacity(void *private)
static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
{
struct nvm_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun = rblk->rlun;
int offset;
struct rrpc_addr *laddr;
- u64 paddr, pladdr;
+ u64 paddr, pladdr, poffset;
+ poffset = lun_poffset(rlun->parent, dev);
for (offset = 0; offset < dev->pgs_per_blk; offset++) {
paddr = block_to_addr(rrpc, rblk) + offset;
- pladdr = rrpc->rev_trans_map[paddr].addr;
+ pladdr = rlun->rev_trans_map[paddr - poffset].addr;
if (pladdr == ADDR_EMPTY)
continue;
@@ -1341,7 +1405,7 @@ err:
static struct nvm_tgt_type tt_rrpc;
static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
- int lun_begin, int lun_end)
+ int lun_begin, int lun_end, unsigned long flags)
{
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
@@ -1379,15 +1443,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
}
rrpc->soffset = soffset;
- ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+ ret = rrpc_luns_init(rrpc, lun_begin, lun_end, flags);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
goto err;
}
- rrpc->poffset = dev->sec_per_lun * lun_begin;
- rrpc->lun_offset = lun_begin;
-
ret = rrpc_core_init(rrpc);
if (ret) {
pr_err("nvm: rrpc: could not initialize core\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 9380c68..4d756d8 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -86,6 +86,9 @@ struct rrpc_lun {
*/
struct work_struct ws_gc;
+ /* store a reverse map for garbage collection */
+ struct rrpc_rev_addr *rev_trans_map;
+ spinlock_t rev_lock;
spinlock_t lock;
};
@@ -124,9 +127,6 @@ struct rrpc {
* addresses are used when writing to the disk block device.
*/
struct rrpc_addr *trans_map;
- /* also store a reverse map for garbage collection */
- struct rrpc_rev_addr *rev_trans_map;
- spinlock_t rev_lock;
struct rrpc_inflight inflights;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 18f1bb0..88a9d2b 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -212,6 +212,20 @@ struct nvm_tgt_instance {
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
+#define NVM_FIXED 0X0001
+#define NVM_NOALLOC 0X0002
+
+/* These are stolen from mman.h */
+#define _calc_nvm_trans(x, bit1, bit2) \
+ ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
+ : ((x) & (bit1)) / ((bit1) / (bit2)))
+
+static inline unsigned long
+calc_nvm_create_bits(__u32 c_flags)
+{
+ return _calc_nvm_trans(c_flags, NVM_C_FIXED, NVM_FIXED);
+}
+
struct nvm_rq;
typedef void (nvm_end_io_fn)(struct nvm_rq *);
@@ -271,6 +285,7 @@ struct nvm_lun {
spinlock_t lock;
struct nvm_block *blocks;
+ void *private;
};
enum {
@@ -342,6 +357,8 @@ struct nvm_dev {
int nr_luns;
unsigned max_pages_per_blk;
+ unsigned long *lun_map;
+
void *ppalist_pool;
struct nvm_id identity;
@@ -424,7 +441,8 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
+typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int,
+ unsigned long);
typedef void (nvm_tgt_exit_fn)(void *);
struct nvm_tgt_type {
@@ -461,7 +479,8 @@ typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long);
-typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
+typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int, unsigned long);
+typedef void (nvmm_put_lun_fn)(struct nvm_dev *, int);
typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
@@ -488,6 +507,7 @@ struct nvmm_type {
/* Configuration management */
nvmm_get_lun_fn *get_lun;
+ nvmm_put_lun_fn *put_lun;
/* Statistics */
nvmm_lun_info_print_fn *lun_info_print;
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index 774a431..700f8b2 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -37,6 +37,9 @@
#define NVM_CTRL_FILE "/dev/lightnvm/control"
+#define NVM_C_FIXED 0X0001 /* Interpret lun exactly */
+#define NVM_C_FLAGS NVM_C_FIXED
+
struct nvm_ioctl_info_tgt {
__u32 version[3];
__u32 reserved;
--
1.8.3.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-26 12:33 [PATCH v2 1/2] lightnvm: specify target's logical address area Wenwei Tao
2016-01-26 12:33 ` [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support Wenwei Tao
@ 2016-01-26 14:52 ` Matias Bjørling
2016-01-27 2:21 ` Wenwei Tao
1 sibling, 1 reply; 15+ messages in thread
From: Matias Bjørling @ 2016-01-26 14:52 UTC (permalink / raw)
To: Wenwei Tao; +Cc: linux-kernel, linux-block
On 01/26/2016 01:33 PM, Wenwei Tao wrote:
> We can create more than one target on a lightnvm
> device by specifying its begin lun and end lun.
>
> But only specify the physical address area is not
> enough, we need to get the corresponding non-
> intersection logical address area division from
> the backend device's logcial address space.
> Otherwise the targets on the device might use
> the same logical addresses cause incorrect
> information in the device's l2p table.
>
> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
> ---
> Changes since v1:
> - rename some variables
> - add parentheses for clarity
> - make gennvm_get_area return int, and add one more sector_t* parameter
> to pass the begin sector of the corresponding target
> - rebase to v4.5-rc1
>
> drivers/lightnvm/core.c | 1 +
> drivers/lightnvm/gennvm.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++
> drivers/lightnvm/gennvm.h | 6 +++++
> drivers/lightnvm/rrpc.c | 45 +++++++++++++++++++++++++++++++++---
> drivers/lightnvm/rrpc.h | 1 +
> include/linux/lightnvm.h | 8 +++++++
> 6 files changed, 117 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 33224cb..27a59e8 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
> dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
> INIT_LIST_HEAD(&dev->online_targets);
> mutex_init(&dev->mlock);
> + spin_lock_init(&dev->lock);
>
> return 0;
> }
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 7fb725b..34ea4ff 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -20,6 +20,60 @@
>
> #include "gennvm.h"
>
> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
> + sector_t size)
> +{
> + struct gen_nvm *gn = dev->mp;
> + struct gennvm_area *area, *prev;
> + sector_t begin = 0;
> + int page_size = dev->sec_size * dev->sec_per_pg;
> + sector_t max_sectors = (page_size * dev->total_pages) >> 9;
> +
> + if (size > max_sectors)
> + return -EINVAL;
> + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
> + if (!area)
> + return -ENOMEM;
> +
> + spin_lock(&dev->lock);
> + list_for_each_entry(prev, &gn->area_list, list) {
> + if (begin + size > prev->begin) {
> + begin = prev->end;
> + continue;
> + }
> + break;
> + }
> +
> + if ((begin + size) > max_sectors) {
> + spin_unlock(&dev->lock);
> + kfree(area);
> + return -EINVAL;
> + }
> +
> + area->begin = *begin_sect = begin;
> + area->end = begin + size;
> + list_add(&area->list, &prev->list);
> + spin_unlock(&dev->lock);
> + return 0;
> +}
> +
> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
> +{
> + struct gen_nvm *gn = dev->mp;
> + struct gennvm_area *area;
> +
> + spin_lock(&dev->lock);
> + list_for_each_entry(area, &gn->area_list, list) {
> + if (area->begin == begin) {
> + list_del(&area->list);
> + spin_unlock(&dev->lock);
> + kfree(area);
> + return;
> + }
> + }
> + spin_unlock(&dev->lock);
> +}
> +
> static void gennvm_blocks_free(struct nvm_dev *dev)
> {
> struct gen_nvm *gn = dev->mp;
> @@ -230,6 +284,7 @@ static int gennvm_register(struct nvm_dev *dev)
>
> gn->dev = dev;
> gn->nr_luns = dev->nr_luns;
> + INIT_LIST_HEAD(&gn->area_list);
> dev->mp = gn;
>
> ret = gennvm_luns_init(dev, gn);
> @@ -466,6 +521,10 @@ static struct nvmm_type gennvm = {
>
> .get_lun = gennvm_get_lun,
> .lun_info_print = gennvm_lun_info_print,
> +
> + .get_area = gennvm_get_area,
> + .put_area = gennvm_put_area,
> +
> };
>
> static int __init gennvm_module_init(void)
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> index 9c24b5b..04d7c23 100644
> --- a/drivers/lightnvm/gennvm.h
> +++ b/drivers/lightnvm/gennvm.h
> @@ -39,8 +39,14 @@ struct gen_nvm {
>
> int nr_luns;
> struct gen_lun *luns;
> + struct list_head area_list;
> };
>
> +struct gennvm_area {
> + struct list_head list;
> + sector_t begin;
> + sector_t end; /* end is excluded */
> +};
> #define gennvm_for_each_lun(bm, lun, i) \
> for ((i) = 0, lun = &(bm)->luns[0]; \
> (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index d8c7595..c8c27f9 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -1042,7 +1042,18 @@ static int rrpc_map_init(struct rrpc *rrpc)
> {
> struct nvm_dev *dev = rrpc->dev;
> sector_t i;
> - int ret;
> + u64 slba;
> + int ret, page_size;
> + int page_shfit, nr_pages;
Did you mean page_shift here?
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-26 14:52 ` [PATCH v2 1/2] lightnvm: specify target's logical address area Matias Bjørling
@ 2016-01-27 2:21 ` Wenwei Tao
2016-01-27 5:52 ` Matias Bjørling
0 siblings, 1 reply; 15+ messages in thread
From: Wenwei Tao @ 2016-01-27 2:21 UTC (permalink / raw)
To: Matias Bjørling; +Cc: linux-kernel, linux-block
Yes, It's a spelling mistake, will correct it in next version.
2016-01-26 22:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 01/26/2016 01:33 PM, Wenwei Tao wrote:
>> We can create more than one target on a lightnvm
>> device by specifying its begin lun and end lun.
>>
>> But only specify the physical address area is not
>> enough, we need to get the corresponding non-
>> intersection logical address area division from
>> the backend device's logcial address space.
>> Otherwise the targets on the device might use
>> the same logical addresses cause incorrect
>> information in the device's l2p table.
>>
>> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
>> ---
>> Changes since v1:
>> - rename some variables
>> - add parentheses for clarity
>> - make gennvm_get_area return int, and add one more sector_t* parameter
>> to pass the begin sector of the corresponding target
>> - rebase to v4.5-rc1
>>
>> drivers/lightnvm/core.c | 1 +
>> drivers/lightnvm/gennvm.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++
>> drivers/lightnvm/gennvm.h | 6 +++++
>> drivers/lightnvm/rrpc.c | 45 +++++++++++++++++++++++++++++++++---
>> drivers/lightnvm/rrpc.h | 1 +
>> include/linux/lightnvm.h | 8 +++++++
>> 6 files changed, 117 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>> index 33224cb..27a59e8 100644
>> --- a/drivers/lightnvm/core.c
>> +++ b/drivers/lightnvm/core.c
>> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>> dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
>> INIT_LIST_HEAD(&dev->online_targets);
>> mutex_init(&dev->mlock);
>> + spin_lock_init(&dev->lock);
>>
>> return 0;
>> }
>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>> index 7fb725b..34ea4ff 100644
>> --- a/drivers/lightnvm/gennvm.c
>> +++ b/drivers/lightnvm/gennvm.c
>> @@ -20,6 +20,60 @@
>>
>> #include "gennvm.h"
>>
>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>> + sector_t size)
>> +{
>> + struct gen_nvm *gn = dev->mp;
>> + struct gennvm_area *area, *prev;
>> + sector_t begin = 0;
>> + int page_size = dev->sec_size * dev->sec_per_pg;
>> + sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>> +
>> + if (size > max_sectors)
>> + return -EINVAL;
>> + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>> + if (!area)
>> + return -ENOMEM;
>> +
>> + spin_lock(&dev->lock);
>> + list_for_each_entry(prev, &gn->area_list, list) {
>> + if (begin + size > prev->begin) {
>> + begin = prev->end;
>> + continue;
>> + }
>> + break;
>> + }
>> +
>> + if ((begin + size) > max_sectors) {
>> + spin_unlock(&dev->lock);
>> + kfree(area);
>> + return -EINVAL;
>> + }
>> +
>> + area->begin = *begin_sect = begin;
>> + area->end = begin + size;
>> + list_add(&area->list, &prev->list);
>> + spin_unlock(&dev->lock);
>> + return 0;
>> +}
>> +
>> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
>> +{
>> + struct gen_nvm *gn = dev->mp;
>> + struct gennvm_area *area;
>> +
>> + spin_lock(&dev->lock);
>> + list_for_each_entry(area, &gn->area_list, list) {
>> + if (area->begin == begin) {
>> + list_del(&area->list);
>> + spin_unlock(&dev->lock);
>> + kfree(area);
>> + return;
>> + }
>> + }
>> + spin_unlock(&dev->lock);
>> +}
>> +
>> static void gennvm_blocks_free(struct nvm_dev *dev)
>> {
>> struct gen_nvm *gn = dev->mp;
>> @@ -230,6 +284,7 @@ static int gennvm_register(struct nvm_dev *dev)
>>
>> gn->dev = dev;
>> gn->nr_luns = dev->nr_luns;
>> + INIT_LIST_HEAD(&gn->area_list);
>> dev->mp = gn;
>>
>> ret = gennvm_luns_init(dev, gn);
>> @@ -466,6 +521,10 @@ static struct nvmm_type gennvm = {
>>
>> .get_lun = gennvm_get_lun,
>> .lun_info_print = gennvm_lun_info_print,
>> +
>> + .get_area = gennvm_get_area,
>> + .put_area = gennvm_put_area,
>> +
>> };
>>
>> static int __init gennvm_module_init(void)
>> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
>> index 9c24b5b..04d7c23 100644
>> --- a/drivers/lightnvm/gennvm.h
>> +++ b/drivers/lightnvm/gennvm.h
>> @@ -39,8 +39,14 @@ struct gen_nvm {
>>
>> int nr_luns;
>> struct gen_lun *luns;
>> + struct list_head area_list;
>> };
>>
>> +struct gennvm_area {
>> + struct list_head list;
>> + sector_t begin;
>> + sector_t end; /* end is excluded */
>> +};
>> #define gennvm_for_each_lun(bm, lun, i) \
>> for ((i) = 0, lun = &(bm)->luns[0]; \
>> (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>> index d8c7595..c8c27f9 100644
>> --- a/drivers/lightnvm/rrpc.c
>> +++ b/drivers/lightnvm/rrpc.c
>> @@ -1042,7 +1042,18 @@ static int rrpc_map_init(struct rrpc *rrpc)
>> {
>> struct nvm_dev *dev = rrpc->dev;
>> sector_t i;
>> - int ret;
>> + u64 slba;
>> + int ret, page_size;
>> + int page_shfit, nr_pages;
>
> Did you mean page_shift here?
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-27 2:21 ` Wenwei Tao
@ 2016-01-27 5:52 ` Matias Bjørling
2016-01-27 6:06 ` Wenwei Tao
0 siblings, 1 reply; 15+ messages in thread
From: Matias Bjørling @ 2016-01-27 5:52 UTC (permalink / raw)
To: Wenwei Tao; +Cc: linux-kernel, linux-block
On 01/27/2016 03:21 AM, Wenwei Tao wrote:
> Yes, It's a spelling mistake, will correct it in next version.
I can fix it in the version I apply. No problem.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-27 5:52 ` Matias Bjørling
@ 2016-01-27 6:06 ` Wenwei Tao
2016-01-27 9:36 ` Matias Bjørling
0 siblings, 1 reply; 15+ messages in thread
From: Wenwei Tao @ 2016-01-27 6:06 UTC (permalink / raw)
To: Matias Bjørling; +Cc: linux-kernel, linux-block
Thanks.
2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>
>> Yes, It's a spelling mistake, will correct it in next version.
>
>
> I can fix it in the version I apply. No problem.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-27 6:06 ` Wenwei Tao
@ 2016-01-27 9:36 ` Matias Bjørling
2016-01-27 12:47 ` Wenwei Tao
0 siblings, 1 reply; 15+ messages in thread
From: Matias Bjørling @ 2016-01-27 9:36 UTC (permalink / raw)
To: Wenwei Tao; +Cc: linux-kernel, linux-block
On 01/27/2016 07:06 AM, Wenwei Tao wrote:
> Thanks.
>
> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>
>>> Yes, It's a spelling mistake, will correct it in next version.
>>
>>
>> I can fix it in the version I apply. No problem.
Hi Wenwei,
I've changed it to this. Clean up the variables a bit. Is that ok with you?
Thanks
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb..27a59e8 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
INIT_LIST_HEAD(&dev->online_targets);
mutex_init(&dev->mlock);
+ spin_lock_init(&dev->lock);
return 0;
}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 7fb725b..6e2685d 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,63 @@
#include "gennvm.h"
+static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
+ sector_t size)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area, *prev;
+ int page_size = dev->sec_size * dev->sec_per_pg;
+ sector_t begin = 0;
+ sector_t max_sectors = (page_size * dev->total_pages) >> 9;
+
+ if (size > max_sectors)
+ return -EINVAL;
+
+ area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+ if (!area)
+ return -ENOMEM;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(prev, &gn->area_list, list) {
+ if (begin + size > prev->begin) {
+ begin = prev->end;
+ continue;
+ }
+ break;
+ }
+
+ if ((begin + size) > max_sectors) {
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return -EINVAL;
+ }
+
+ area->begin = *begin_sect = begin;
+ area->end = begin + size;
+ list_add(&area->list, &prev->list);
+ spin_unlock(&dev->lock);
+
+ return 0;
+}
+
+static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(area, &gn->area_list, list) {
+ if (area->begin != begin)
+ continue;
+
+ list_del(&area->list);
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return;
+ }
+ spin_unlock(&dev->lock);
+}
+
static void gennvm_blocks_free(struct nvm_dev *dev)
{
struct gen_nvm *gn = dev->mp;
@@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
gn->dev = dev;
gn->nr_luns = dev->nr_luns;
+ INIT_LIST_HEAD(&gn->area_list);
dev->mp = gn;
ret = gennvm_luns_init(dev, gn);
@@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
.get_lun = gennvm_get_lun,
.lun_info_print = gennvm_lun_info_print,
+
+ .get_area = gennvm_get_area,
+ .put_area = gennvm_put_area,
+
};
static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 9c24b5b..04d7c23 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -39,8 +39,14 @@ struct gen_nvm {
int nr_luns;
struct gen_lun *luns;
+ struct list_head area_list;
};
+struct gennvm_area {
+ struct list_head list;
+ sector_t begin;
+ sector_t end; /* end is excluded */
+};
#define gennvm_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index e2710da..20afe1c 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
sector_t i;
- int ret;
+ u64 slba;
+ int ret, page_size;
+
+ page_size = dev->sec_per_pg * dev->sec_size;
+ slba = rrpc->soffset >> (ilog2(page_size) - 9);
rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
if (!rrpc->trans_map)
@@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
- rrpc_l2p_update, rrpc);
+ ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
+ rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1072,7 +1076,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
}
-
/* Minimum pages needed within a lun */
#define PAGE_POOL_SIZE 16
#define ADDR_POOL_SIZE 64
@@ -1186,12 +1189,32 @@ err:
return -ENOMEM;
}
+/* returns 0 on success and stores the beginning address in *begin */
+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+ sector_t size = rrpc->nr_luns * dev->sec_per_lun * dev->sec_size;
+
+ size >>= 9;
+ return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+
+ mt->put_area(dev, rrpc->soffset);
+}
+
static void rrpc_free(struct rrpc *rrpc)
{
rrpc_gc_free(rrpc);
rrpc_map_free(rrpc);
rrpc_core_free(rrpc);
rrpc_luns_free(rrpc);
+ rrpc_area_free(rrpc);
kfree(rrpc);
}
@@ -1312,6 +1335,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct
gendisk *tdisk,
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
struct rrpc *rrpc;
+ sector_t soffset;
int ret;
if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1337,6 +1361,13 @@ static void *rrpc_init(struct nvm_dev *dev,
struct gendisk *tdisk,
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
+ ret = rrpc_area_init(rrpc, &soffset);
+ if (ret < 0) {
+ pr_err("nvm: rrpc: could not initialize area\n");
+ return ERR_PTR(ret);
+ }
+ rrpc->soffset = soffset;
+
ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7..9380c68 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -97,6 +97,7 @@ struct rrpc {
struct nvm_dev *dev;
struct gendisk *disk;
+ sector_t soffset; /* logical sector offset */
u64 poffset; /* physical page offset */
int lun_offset;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d675011..18f1bb0 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -351,6 +351,7 @@ struct nvm_dev {
char name[DISK_NAME_LEN];
struct mutex mlock;
+ spinlock_t lock;
};
static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *,
struct nvm_block *,
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
+typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
+typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+
struct nvmm_type {
const char *name;
unsigned int version[3];
@@ -487,6 +491,10 @@ struct nvmm_type {
/* Statistics */
nvmm_lun_info_print_fn *lun_info_print;
+
+ nvmm_get_area_fn *get_area;
+ nvmm_put_area_fn *put_area;
+
struct list_head list;
};
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support
2016-01-26 12:33 ` [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support Wenwei Tao
@ 2016-01-27 9:44 ` Matias Bjørling
2016-01-28 8:50 ` Wenwei Tao
0 siblings, 1 reply; 15+ messages in thread
From: Matias Bjørling @ 2016-01-27 9:44 UTC (permalink / raw)
To: Wenwei Tao; +Cc: linux-kernel, linux-block
On 01/26/2016 01:33 PM, Wenwei Tao wrote:
> When create a target, we specify the begin lunid and
> the end lunid, and get the corresponding continuous
> luns from media manager, if one of the luns is not free,
> we failed to create the target, even if the device's
> total free luns are enough.
>
> So add non-continuous lun target creation support,
> thus we can improve the backend device's space utilization.
> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
> ---
> Changes since v1:
> -use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun
> -add target creation flags check
> -rebase to v4.5-rc1
>
> drivers/lightnvm/core.c | 36 ++++---
> drivers/lightnvm/gennvm.c | 42 ++++++++-
> drivers/lightnvm/rrpc.c | 215 +++++++++++++++++++++++++++---------------
> drivers/lightnvm/rrpc.h | 6 +-
> include/linux/lightnvm.h | 24 ++++-
> include/uapi/linux/lightnvm.h | 3 +
> 6 files changed, 229 insertions(+), 97 deletions(-)
>
Hi Wenwei,
I did some digging on the patch and changed the interface to a
reserve/release interface. I also removed the logic to dynamically
select another lun than the one requested.
A couple of questions:
1. The rrpc_lun->rev_lock and rev_trans_map change; this might be for
another patch, and it isn't directly related to continuous mapping?
2. Instead of dynamically assigning new luns when not available, what
about taking a list of lun ids instead?
I would only implement this in the lnvm ioctl interface. It would allow
a list of lun ids to be passed through the lnvm ioctl interface. This
way, the NVM_CONFIG_TYPE_SIMPLE can be extended with another
NVM_CONFIG_TYPE_LIST, or similar, which then parses the ioctl
appropriately. Would that be a better way to do it?
Here is the diff. It is also rebased on top of the two latest patches
that which are sent up for the next -rc.
Thanks
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 27a59e8..59a4bf9 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -468,6 +468,11 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->luns_per_chnl *
dev->nr_chnls;
dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dev->lun_map)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&dev->online_targets);
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
@@ -610,6 +615,7 @@ void nvm_unregister(char *disk_name)
up_write(&nvm_lock);
nvm_exit(dev);
+ kfree(dev->lun_map);
kfree(dev);
}
EXPORT_SYMBOL(nvm_unregister);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 6e2685d..6419898 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -188,6 +188,9 @@ static int gennvm_block_map(u64 slba, u32 nlb,
__le64 *entries, void *private)
lun_id = div_u64(pba, dev->sec_per_lun);
lun = &gn->luns[lun_id];
+ if (!test_bit(lun_id, dev->lun_map))
+ __set_bit(lun_id, dev->lun_map);
+
/* Calculate block offset into lun */
pba = pba - (dev->sec_per_lun * lun_id);
blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
@@ -478,10 +481,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev,
struct nvm_block *blk,
return nvm_erase_ppa(dev, &addr, 1);
}
+static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
+{
+ return test_and_set_bit(lunid, dev->lun_map);
+}
+
+static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
+{
+ WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+}
+
static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
{
struct gen_nvm *gn = dev->mp;
+ if (unlikely(lunid >= dev->nr_luns))
+ return NULL;
+
return &gn->luns[lunid].vlun;
}
@@ -523,6 +539,8 @@ static struct nvmm_type gennvm = {
.erase_blk = gennvm_erase_blk,
.get_lun = gennvm_get_lun,
+ .reserve_lun = gennvm_reserve_lun,
+ .release_lun = gennvm_release_lun,
.lun_info_print = gennvm_lun_info_print,
.get_area = gennvm_get_area,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 20afe1c..0a99ebc 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -26,25 +26,32 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct
bio *bio,
for ((i) = 0, rlun = &(rrpc)->luns[0]; \
(i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
+static inline u64 lun_poffset(struct nvm_dev *dev, struct nvm_lun *lun)
+{
+ return lun->id * dev->sec_per_lun;
+}
+
static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
{
struct rrpc_block *rblk = a->rblk;
- unsigned int pg_offset;
+ struct rrpc_lun *rlun = rblk->rlun;
+ u64 pg_offset;
- lockdep_assert_held(&rrpc->rev_lock);
+ lockdep_assert_held(&rlun->rev_lock);
if (a->addr == ADDR_EMPTY || !rblk)
return;
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+ div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, (u32 *)&pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
spin_unlock(&rblk->lock);
- rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+ pg_offset = lun_poffset(rrpc->dev, rlun->parent);
+ rlun->rev_trans_map[a->addr - pg_offset].addr = ADDR_EMPTY;
}
static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -52,14 +59,15 @@ static void rrpc_invalidate_range(struct rrpc *rrpc,
sector_t slba,
{
sector_t i;
- spin_lock(&rrpc->rev_lock);
for (i = slba; i < slba + len; i++) {
struct rrpc_addr *gp = &rrpc->trans_map[i];
+ struct rrpc_lun *rlun = gp->rblk->rlun;
+ spin_lock(&rlun->rev_lock);
rrpc_page_invalidate(rrpc, gp);
+ spin_unlock(&rlun->rev_lock);
gp->rblk = NULL;
}
- spin_unlock(&rrpc->rev_lock);
}
static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
@@ -281,13 +289,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block
*rblk)
{
struct request_queue *q = rrpc->dev->q;
+ struct rrpc_lun *rlun = rblk->rlun;
struct rrpc_rev_addr *rev;
struct nvm_rq *rqd;
struct bio *bio;
struct page *page;
int slot;
int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
- u64 phys_addr;
+ u64 phys_addr, poffset;
DECLARE_COMPLETION_ONSTACK(wait);
if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
@@ -303,6 +312,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc,
struct rrpc_block *rblk)
if (!page)
return -ENOMEM;
+ poffset = lun_poffset(rrpc->dev, rlun->parent);
while ((slot = find_first_zero_bit(rblk->invalid_pages,
nr_pgs_per_blk)) < nr_pgs_per_blk) {
@@ -310,23 +320,23 @@ static int rrpc_move_valid_pages(struct rrpc
*rrpc, struct rrpc_block *rblk)
phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
try:
- spin_lock(&rrpc->rev_lock);
+ spin_lock(&rlun->rev_lock);
/* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+ rev = &rlun->rev_trans_map[phys_addr - poffset];
/* already updated by previous regular write */
if (rev->addr == ADDR_EMPTY) {
- spin_unlock(&rrpc->rev_lock);
+ spin_unlock(&rlun->rev_lock);
continue;
}
rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
if (IS_ERR_OR_NULL(rqd)) {
- spin_unlock(&rrpc->rev_lock);
+ spin_unlock(&rlun->rev_lock);
schedule();
goto try;
}
- spin_unlock(&rrpc->rev_lock);
+ spin_unlock(&rlun->rev_lock);
/* Perform read to do GC */
bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
@@ -395,7 +405,7 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc_block *rblk = gcb->rblk;
struct nvm_dev *dev = rrpc->dev;
struct nvm_lun *lun = rblk->parent->lun;
- struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+ struct rrpc_lun *rlun = lun->private;
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
@@ -496,9 +506,9 @@ static void rrpc_gc_queue(struct work_struct *work)
ws_gc);
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
- struct nvm_lun *lun = rblk->parent->lun;
struct nvm_block *blk = rblk->parent;
- struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+ struct nvm_lun *lun = blk->lun;
+ struct rrpc_lun *rlun = lun->private;
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
@@ -549,22 +559,24 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct
rrpc *rrpc, int is_gc)
static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
struct rrpc_block *rblk, u64 paddr)
{
+ struct rrpc_lun *rlun = rblk->rlun;
struct rrpc_addr *gp;
struct rrpc_rev_addr *rev;
+ u64 poffset = lun_poffset(rrpc->dev, rlun->parent);
BUG_ON(laddr >= rrpc->nr_pages);
gp = &rrpc->trans_map[laddr];
- spin_lock(&rrpc->rev_lock);
+ spin_lock(&rlun->rev_lock);
if (gp->rblk)
rrpc_page_invalidate(rrpc, gp);
gp->addr = paddr;
gp->rblk = rblk;
- rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+ rev = &rlun->rev_trans_map[gp->addr - poffset];
rev->addr = laddr;
- spin_unlock(&rrpc->rev_lock);
+ spin_unlock(&rlun->rev_lock);
return gp;
}
@@ -953,8 +965,6 @@ static void rrpc_requeue(struct work_struct *work)
static void rrpc_gc_free(struct rrpc *rrpc)
{
- struct rrpc_lun *rlun;
- int i;
if (rrpc->krqd_wq)
destroy_workqueue(rrpc->krqd_wq);
@@ -962,16 +972,6 @@ static void rrpc_gc_free(struct rrpc *rrpc)
if (rrpc->kgc_wq)
destroy_workqueue(rrpc->kgc_wq);
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- if (!rlun->blocks)
- break;
- vfree(rlun->blocks);
- }
}
static int rrpc_gc_init(struct rrpc *rrpc)
@@ -992,7 +992,6 @@ static int rrpc_gc_init(struct rrpc *rrpc)
static void rrpc_map_free(struct rrpc *rrpc)
{
- vfree(rrpc->rev_trans_map);
vfree(rrpc->trans_map);
}
@@ -1000,19 +999,28 @@ static int rrpc_l2p_update(u64 slba, u32 nlb,
__le64 *entries, void *private)
{
struct rrpc *rrpc = (struct rrpc *)private;
struct nvm_dev *dev = rrpc->dev;
- struct rrpc_addr *addr = rrpc->trans_map + slba;
- struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
+ struct rrpc_addr *addr;
+ struct rrpc_rev_addr *raddr;
sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
- u64 elba = slba + nlb;
- u64 i;
+ int page_size = dev->sec_per_pg * dev->sec_size;
+ u64 elba, i;
+
+ elba = slba + nlb;
if (unlikely(elba > dev->total_pages)) {
pr_err("nvm: L2P data from device is out of bounds!\n");
return -EINVAL;
}
+ slba -= rrpc->soffset >> (ilog2(page_size) - 9);
+ addr = rrpc->trans_map + slba;
for (i = 0; i < nlb; i++) {
+ struct rrpc_lun *rlun;
+ struct nvm_lun *lun;
u64 pba = le64_to_cpu(entries[i]);
+ u64 poffset;
+ int lunid;
+
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
@@ -1028,8 +1036,15 @@ static int rrpc_l2p_update(u64 slba, u32 nlb,
__le64 *entries, void *private)
if (!pba)
continue;
+ lunid = div_u64(pba, dev->sec_per_lun);
+ lun = dev->mt->get_lun(dev, lunid);
+ if (unlikely(!lun))
+ return -EINVAL;
+ rlun = lun->private;
+ raddr = rlun->rev_trans_map;
+ poffset = lun_poffset(dev, lun);
addr[i].addr = pba;
- raddr[pba].addr = slba + i;
+ raddr[pba - poffset].addr = slba + i;
}
return 0;
@@ -1049,17 +1064,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
if (!rrpc->trans_map)
return -ENOMEM;
- rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_pages);
- if (!rrpc->rev_trans_map)
- return -ENOMEM;
for (i = 0; i < rrpc->nr_pages; i++) {
struct rrpc_addr *p = &rrpc->trans_map[i];
- struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
p->addr = ADDR_EMPTY;
- r->addr = ADDR_EMPTY;
}
if (!dev->ops->get_l2p_tbl)
@@ -1130,22 +1139,86 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
+ struct nvm_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun;
+ struct nvm_lun *lun;
+ int i;
+
+ if (!rrpc->luns)
+ return;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ rlun = &rrpc->luns[i];
+ if (!rlun)
+ break;
+ lun = rlun->parent;
+ dev->mt->release_lun(dev, lun->id);
+ vfree(rlun->rev_trans_map);
+ vfree(rlun->blocks);
+ }
kfree(rrpc->luns);
+ rrpc->luns = NULL;
+
+}
+
+static int rrpc_lun_init(struct rrpc *rrpc, struct rrpc_lun *rlun,
+ struct nvm_lun *lun)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ int i;
+
+ rlun->rrpc = rrpc;
+ rlun->parent = lun;
+
+ rlun->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) *
+ dev->sec_per_lun);
+ if (!rlun->rev_trans_map)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->sec_per_lun; i++) {
+ struct rrpc_rev_addr *r = &rlun->rev_trans_map[i];
+
+ r->addr = ADDR_EMPTY;
+ }
+
+ rlun->blocks = vzalloc(sizeof(struct rrpc_block) * dev->blks_per_lun);
+ if (!rlun->blocks) {
+ vfree(rlun->rev_trans_map);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < dev->blks_per_lun; i++) {
+ struct rrpc_block *rblk = &rlun->blocks[i];
+ struct nvm_block *blk = &lun->blocks[i];
+
+ rblk->parent = blk;
+ rblk->rlun = rlun;
+ INIT_LIST_HEAD(&rblk->prio);
+ spin_lock_init(&rblk->lock);
+ }
+
+ lun->private = rlun;
+ INIT_LIST_HEAD(&rlun->prio_list);
+ INIT_LIST_HEAD(&rlun->open_list);
+ INIT_LIST_HEAD(&rlun->closed_list);
+ INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+ spin_lock_init(&rlun->lock);
+ spin_lock_init(&rlun->rev_lock);
+
+ return 0;
}
static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
{
struct nvm_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
- int i, j;
+ int i, ret;
if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
- spin_lock_init(&rrpc->rev_lock);
-
rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
GFP_KERNEL);
if (!rrpc->luns)
@@ -1153,40 +1226,35 @@ static int rrpc_luns_init(struct rrpc *rrpc, int
lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
+ int lunid = lun_begin + i;
+ struct nvm_lun *lun;
+
+ if (dev->mt->reserve_lun(dev, lunid)) {
+ pr_err("rrpc: lun %u is already allocated\n", lunid);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ lun = dev->mt->get_lun(dev, lunid);
+ if (!lun) {
+ ret = -EINVAL;
+ goto err;
+ }
rlun = &rrpc->luns[i];
- rlun->rrpc = rrpc;
- rlun->parent = lun;
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->open_list);
- INIT_LIST_HEAD(&rlun->closed_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
+ ret = rrpc_lun_init(rrpc, rlun, lun);
+ if (ret)
+ goto err;
rrpc->total_blocks += dev->blks_per_lun;
rrpc->nr_pages += dev->sec_per_lun;
-
- rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- rrpc->dev->blks_per_lun);
- if (!rlun->blocks)
- goto err;
-
- for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
- struct rrpc_block *rblk = &rlun->blocks[j];
- struct nvm_block *blk = &lun->blocks[j];
-
- rblk->parent = blk;
- rblk->rlun = rlun;
- INIT_LIST_HEAD(&rblk->prio);
- spin_lock_init(&rblk->lock);
- }
}
return 0;
err:
- return -ENOMEM;
+ rrpc_luns_free(rrpc);
+ return ret;
+
}
/* returns 0 on success and stores the beginning address in *begin */
@@ -1258,14 +1326,16 @@ static sector_t rrpc_capacity(void *private)
static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block
*rblk)
{
struct nvm_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun = rblk->rlun;
int offset;
struct rrpc_addr *laddr;
- u64 paddr, pladdr;
+ u64 paddr, pladdr, poffset;
+ poffset = lun_poffset(dev, rlun->parent);
for (offset = 0; offset < dev->pgs_per_blk; offset++) {
paddr = block_to_addr(rrpc, rblk) + offset;
- pladdr = rrpc->rev_trans_map[paddr].addr;
+ pladdr = rlun->rev_trans_map[paddr - poffset].addr;
if (pladdr == ADDR_EMPTY)
continue;
@@ -1374,9 +1444,6 @@ static void *rrpc_init(struct nvm_dev *dev, struct
gendisk *tdisk,
goto err;
}
- rrpc->poffset = dev->sec_per_lun * lun_begin;
- rrpc->lun_offset = lun_begin;
-
ret = rrpc_core_init(rrpc);
if (ret) {
pr_err("nvm: rrpc: could not initialize core\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 9380c68..4d756d8 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -86,6 +86,9 @@ struct rrpc_lun {
*/
struct work_struct ws_gc;
+ /* store a reverse map for garbage collection */
+ struct rrpc_rev_addr *rev_trans_map;
+ spinlock_t rev_lock;
spinlock_t lock;
};
@@ -124,9 +127,6 @@ struct rrpc {
* addresses are used when writing to the disk block device.
*/
struct rrpc_addr *trans_map;
- /* also store a reverse map for garbage collection */
- struct rrpc_rev_addr *rev_trans_map;
- spinlock_t rev_lock;
struct rrpc_inflight inflights;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 18f1bb0..a33af4f 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -271,6 +271,7 @@ struct nvm_lun {
spinlock_t lock;
struct nvm_block *blocks;
+ void *private;
};
enum {
@@ -342,6 +343,8 @@ struct nvm_dev {
int nr_luns;
unsigned max_pages_per_blk;
+ unsigned long *lun_map;
+
void *ppalist_pool;
struct nvm_id identity;
@@ -462,6 +465,8 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *,
struct nvm_rq *);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
+typedef int (nvmm_reserve_lun(struct nvm_dev *, int));
+typedef void (nvmm_release_lun(struct nvm_dev *, int));
typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
@@ -488,6 +493,8 @@ struct nvmm_type {
/* Configuration management */
nvmm_get_lun_fn *get_lun;
+ nvmm_reserve_lun *reserve_lun;
+ nvmm_release_lun *release_lun;
/* Statistics */
nvmm_lun_info_print_fn *lun_info_print;
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-27 9:36 ` Matias Bjørling
@ 2016-01-27 12:47 ` Wenwei Tao
2016-01-27 13:26 ` Matias Bjørling
0 siblings, 1 reply; 15+ messages in thread
From: Wenwei Tao @ 2016-01-27 12:47 UTC (permalink / raw)
To: Matias Bjørling; +Cc: linux-kernel, linux-block
2016-01-27 17:36 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 01/27/2016 07:06 AM, Wenwei Tao wrote:
>> Thanks.
>>
>> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>>
>>>> Yes, It's a spelling mistake, will correct it in next version.
>>>
>>>
>>> I can fix it in the version I apply. No problem.
>
> Hi Wenwei,
>
> I've changed it to this. Clean up the variables a bit. Is that ok with you?
>
> Thanks
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 33224cb..27a59e8 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
> dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
> INIT_LIST_HEAD(&dev->online_targets);
> mutex_init(&dev->mlock);
> + spin_lock_init(&dev->lock);
>
> return 0;
> }
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 7fb725b..6e2685d 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -20,6 +20,63 @@
>
> #include "gennvm.h"
>
> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
> + sector_t size)
> +{
> + struct gen_nvm *gn = dev->mp;
> + struct gennvm_area *area, *prev;
> + int page_size = dev->sec_size * dev->sec_per_pg;
> + sector_t begin = 0;
> + sector_t max_sectors = (page_size * dev->total_pages) >> 9;
> +
> + if (size > max_sectors)
> + return -EINVAL;
> +
> + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
> + if (!area)
> + return -ENOMEM;
> +
> + spin_lock(&dev->lock);
> + list_for_each_entry(prev, &gn->area_list, list) {
> + if (begin + size > prev->begin) {
> + begin = prev->end;
> + continue;
> + }
> + break;
> + }
> +
> + if ((begin + size) > max_sectors) {
> + spin_unlock(&dev->lock);
> + kfree(area);
> + return -EINVAL;
> + }
> +
> + area->begin = *begin_sect = begin;
> + area->end = begin + size;
> + list_add(&area->list, &prev->list);
I think I have made a mistake here. Insert the new area after prev
will not make the list increase by area->begin. And prev is not
trustable
when out of the loop, it may point to list_entry((head)->next,
typeof(*pos), member). Below is changed code:
+static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
+ sector_t size)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area, *prev, *next;
+ sector_t begin = 0;
+ int page_size = dev->sec_size * dev->sec_per_pg;
+ sector_t max_sectors = (page_size * dev->total_pages) >> 9;
+
+ if (size > max_sectors)
+ return -EINVAL;
+ area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+ if (!area)
+ return -ENOMEM;
+
+ prev = NULL;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(next, &gn->area_list, list) {
+ if (begin + size > next->begin) {
+ begin = next->end;
+ prev = next;
+ continue;
+ }
+ break;
+ }
+
+ if ((begin + size) > max_sectors) {
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return -EINVAL;
+ }
+
+ area->begin = *begin_sect = begin;
+ area->end = begin + size;
+ if (prev)
+ list_add(&area->list, &prev->list);
+ else
+ list_add(&area->list, &gn->area_list);
+ spin_unlock(&dev->lock);
+ return 0;
+}
> + spin_unlock(&dev->lock);
> +
> + return 0;
> +}
> +
> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
> +{
> + struct gen_nvm *gn = dev->mp;
> + struct gennvm_area *area;
> +
> + spin_lock(&dev->lock);
> + list_for_each_entry(area, &gn->area_list, list) {
> + if (area->begin != begin)
> + continue;
> +
> + list_del(&area->list);
> + spin_unlock(&dev->lock);
> + kfree(area);
> + return;
> + }
> + spin_unlock(&dev->lock);
> +}
> +
> static void gennvm_blocks_free(struct nvm_dev *dev)
> {
> struct gen_nvm *gn = dev->mp;
> @@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
>
> gn->dev = dev;
> gn->nr_luns = dev->nr_luns;
> + INIT_LIST_HEAD(&gn->area_list);
> dev->mp = gn;
>
> ret = gennvm_luns_init(dev, gn);
> @@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
>
> .get_lun = gennvm_get_lun,
> .lun_info_print = gennvm_lun_info_print,
> +
> + .get_area = gennvm_get_area,
> + .put_area = gennvm_put_area,
> +
> };
>
> static int __init gennvm_module_init(void)
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> index 9c24b5b..04d7c23 100644
> --- a/drivers/lightnvm/gennvm.h
> +++ b/drivers/lightnvm/gennvm.h
> @@ -39,8 +39,14 @@ struct gen_nvm {
>
> int nr_luns;
> struct gen_lun *luns;
> + struct list_head area_list;
> };
>
> +struct gennvm_area {
> + struct list_head list;
> + sector_t begin;
> + sector_t end; /* end is excluded */
> +};
> #define gennvm_for_each_lun(bm, lun, i) \
> for ((i) = 0, lun = &(bm)->luns[0]; \
> (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index e2710da..20afe1c 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
> {
> struct nvm_dev *dev = rrpc->dev;
> sector_t i;
> - int ret;
> + u64 slba;
> + int ret, page_size;
> +
> + page_size = dev->sec_per_pg * dev->sec_size;
> + slba = rrpc->soffset >> (ilog2(page_size) - 9);
>
> rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
> if (!rrpc->trans_map)
> @@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
> return 0;
>
> /* Bring up the mapping table from device */
> - ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
> - rrpc_l2p_update, rrpc);
> + ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
> + rrpc);
In rrpc_luns_init, rrpc->nr_pages seems to be the target's sector
number and previously dev->total_pages is used, dev->total_pages is
the nvm device page number, so I am a little confusing here.
> if (ret) {
> pr_err("nvm: rrpc: could not read L2P table.\n");
> return -EINVAL;
> @@ -1072,7 +1076,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
> return 0;
> }
>
> -
> /* Minimum pages needed within a lun */
> #define PAGE_POOL_SIZE 16
> #define ADDR_POOL_SIZE 64
> @@ -1186,12 +1189,32 @@ err:
> return -ENOMEM;
> }
>
> +/* returns 0 on success and stores the beginning address in *begin */
> +static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
> +{
> + struct nvm_dev *dev = rrpc->dev;
> + struct nvmm_type *mt = dev->mt;
> + sector_t size = rrpc->nr_luns * dev->sec_per_lun * dev->sec_size;
> +
> + size >>= 9;
> + return mt->get_area(dev, begin, size);
> +}
> +
> +static void rrpc_area_free(struct rrpc *rrpc)
> +{
> + struct nvm_dev *dev = rrpc->dev;
> + struct nvmm_type *mt = dev->mt;
> +
> + mt->put_area(dev, rrpc->soffset);
> +}
> +
> static void rrpc_free(struct rrpc *rrpc)
> {
> rrpc_gc_free(rrpc);
> rrpc_map_free(rrpc);
> rrpc_core_free(rrpc);
> rrpc_luns_free(rrpc);
> + rrpc_area_free(rrpc);
>
> kfree(rrpc);
> }
> @@ -1312,6 +1335,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct
> gendisk *tdisk,
> struct request_queue *bqueue = dev->q;
> struct request_queue *tqueue = tdisk->queue;
> struct rrpc *rrpc;
> + sector_t soffset;
> int ret;
>
> if (!(dev->identity.dom & NVM_RSP_L2P)) {
> @@ -1337,6 +1361,13 @@ static void *rrpc_init(struct nvm_dev *dev,
> struct gendisk *tdisk,
> /* simple round-robin strategy */
> atomic_set(&rrpc->next_lun, -1);
>
> + ret = rrpc_area_init(rrpc, &soffset);
> + if (ret < 0) {
> + pr_err("nvm: rrpc: could not initialize area\n");
> + return ERR_PTR(ret);
> + }
> + rrpc->soffset = soffset;
> +
> ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
> if (ret) {
> pr_err("nvm: rrpc: could not initialize luns\n");
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index ef13ac7..9380c68 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -97,6 +97,7 @@ struct rrpc {
> struct nvm_dev *dev;
> struct gendisk *disk;
>
> + sector_t soffset; /* logical sector offset */
> u64 poffset; /* physical page offset */
> int lun_offset;
>
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index d675011..18f1bb0 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -351,6 +351,7 @@ struct nvm_dev {
> char name[DISK_NAME_LEN];
>
> struct mutex mlock;
> + spinlock_t lock;
> };
>
> static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
> @@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *,
> struct nvm_block *,
> typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
> typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
>
> +typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
> +typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
> +
> struct nvmm_type {
> const char *name;
> unsigned int version[3];
> @@ -487,6 +491,10 @@ struct nvmm_type {
>
> /* Statistics */
> nvmm_lun_info_print_fn *lun_info_print;
> +
> + nvmm_get_area_fn *get_area;
> + nvmm_put_area_fn *put_area;
> +
> struct list_head list;
> };
>
>
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-27 12:47 ` Wenwei Tao
@ 2016-01-27 13:26 ` Matias Bjørling
2016-01-27 14:58 ` Wenwei Tao
0 siblings, 1 reply; 15+ messages in thread
From: Matias Bjørling @ 2016-01-27 13:26 UTC (permalink / raw)
To: Wenwei Tao; +Cc: linux-kernel, linux-block
On 01/27/2016 01:47 PM, Wenwei Tao wrote:
> 2016-01-27 17:36 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>> On 01/27/2016 07:06 AM, Wenwei Tao wrote:
>>> Thanks.
>>>
>>> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>>>
>>>>> Yes, It's a spelling mistake, will correct it in next version.
>>>>
>>>>
>>>> I can fix it in the version I apply. No problem.
>>
>> Hi Wenwei,
>>
>> I've changed it to this. Clean up the variables a bit. Is that ok with you?
>>
>> Thanks
>>
>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>> index 33224cb..27a59e8 100644
>> --- a/drivers/lightnvm/core.c
>> +++ b/drivers/lightnvm/core.c
>> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>> dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
>> INIT_LIST_HEAD(&dev->online_targets);
>> mutex_init(&dev->mlock);
>> + spin_lock_init(&dev->lock);
>>
>> return 0;
>> }
>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>> index 7fb725b..6e2685d 100644
>> --- a/drivers/lightnvm/gennvm.c
>> +++ b/drivers/lightnvm/gennvm.c
>> @@ -20,6 +20,63 @@
>>
>> #include "gennvm.h"
>>
>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>> + sector_t size)
>> +{
>> + struct gen_nvm *gn = dev->mp;
>> + struct gennvm_area *area, *prev;
>> + int page_size = dev->sec_size * dev->sec_per_pg;
>> + sector_t begin = 0;
>> + sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>> +
>> + if (size > max_sectors)
>> + return -EINVAL;
>> +
>> + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>> + if (!area)
>> + return -ENOMEM;
>> +
>> + spin_lock(&dev->lock);
>> + list_for_each_entry(prev, &gn->area_list, list) {
>> + if (begin + size > prev->begin) {
>> + begin = prev->end;
>> + continue;
>> + }
>> + break;
>> + }
>> +
>> + if ((begin + size) > max_sectors) {
>> + spin_unlock(&dev->lock);
>> + kfree(area);
>> + return -EINVAL;
>> + }
>> +
>> + area->begin = *begin_sect = begin;
>> + area->end = begin + size;
>> + list_add(&area->list, &prev->list);
>
> I think I have made a mistake here. Insert the new area after prev
> will not make the list increase by area->begin. And prev is not
> trustable
> when out of the loop, it may point to list_entry((head)->next,
> typeof(*pos), member). Below is changed code:
>
> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
> + sector_t size)
> +{
> + struct gen_nvm *gn = dev->mp;
> + struct gennvm_area *area, *prev, *next;
> + sector_t begin = 0;
> + int page_size = dev->sec_size * dev->sec_per_pg;
> + sector_t max_sectors = (page_size * dev->total_pages) >> 9;
> +
> + if (size > max_sectors)
> + return -EINVAL;
> + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
> + if (!area)
> + return -ENOMEM;
> +
> + prev = NULL;
> +
> + spin_lock(&dev->lock);
> + list_for_each_entry(next, &gn->area_list, list) {
> + if (begin + size > next->begin) {
> + begin = next->end;
> + prev = next;
> + continue;
> + }
> + break;
> + }
> +
> + if ((begin + size) > max_sectors) {
> + spin_unlock(&dev->lock);
> + kfree(area);
> + return -EINVAL;
> + }
> +
> + area->begin = *begin_sect = begin;
> + area->end = begin + size;
> + if (prev)
> + list_add(&area->list, &prev->list);
> + else
> + list_add(&area->list, &gn->area_list);
> + spin_unlock(&dev->lock);
> + return 0;
> +}
>
>
>> + spin_unlock(&dev->lock);
>> +
>> + return 0;
>> +}
>> +
>> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
>> +{
>> + struct gen_nvm *gn = dev->mp;
>> + struct gennvm_area *area;
>> +
>> + spin_lock(&dev->lock);
>> + list_for_each_entry(area, &gn->area_list, list) {
>> + if (area->begin != begin)
>> + continue;
>> +
>> + list_del(&area->list);
>> + spin_unlock(&dev->lock);
>> + kfree(area);
>> + return;
>> + }
>> + spin_unlock(&dev->lock);
>> +}
>> +
>> static void gennvm_blocks_free(struct nvm_dev *dev)
>> {
>> struct gen_nvm *gn = dev->mp;
>> @@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
>>
>> gn->dev = dev;
>> gn->nr_luns = dev->nr_luns;
>> + INIT_LIST_HEAD(&gn->area_list);
>> dev->mp = gn;
>>
>> ret = gennvm_luns_init(dev, gn);
>> @@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
>>
>> .get_lun = gennvm_get_lun,
>> .lun_info_print = gennvm_lun_info_print,
>> +
>> + .get_area = gennvm_get_area,
>> + .put_area = gennvm_put_area,
>> +
>> };
>>
>> static int __init gennvm_module_init(void)
>> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
>> index 9c24b5b..04d7c23 100644
>> --- a/drivers/lightnvm/gennvm.h
>> +++ b/drivers/lightnvm/gennvm.h
>> @@ -39,8 +39,14 @@ struct gen_nvm {
>>
>> int nr_luns;
>> struct gen_lun *luns;
>> + struct list_head area_list;
>> };
>>
>> +struct gennvm_area {
>> + struct list_head list;
>> + sector_t begin;
>> + sector_t end; /* end is excluded */
>> +};
>> #define gennvm_for_each_lun(bm, lun, i) \
>> for ((i) = 0, lun = &(bm)->luns[0]; \
>> (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>> index e2710da..20afe1c 100644
>> --- a/drivers/lightnvm/rrpc.c
>> +++ b/drivers/lightnvm/rrpc.c
>> @@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
>> {
>> struct nvm_dev *dev = rrpc->dev;
>> sector_t i;
>> - int ret;
>> + u64 slba;
>> + int ret, page_size;
>> +
>> + page_size = dev->sec_per_pg * dev->sec_size;
>> + slba = rrpc->soffset >> (ilog2(page_size) - 9);
>>
>> rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
>> if (!rrpc->trans_map)
>> @@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
>> return 0;
>>
>> /* Bring up the mapping table from device */
>> - ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
>> - rrpc_l2p_update, rrpc);
>> + ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
>> + rrpc);
>
> In rrpc_luns_init, rrpc->nr_pages seems to be the target's sector
> number and previously dev->total_pages is used, dev->total_pages is
> the nvm device page number, so I am a little confusing here.
>
The dev->total pages is all pages on media. The rrpc->nr_pages is the
number of pages allocated to rrpc... which should be dev->total_pages
here, as we want to retrieve the full l2p table, and then reconstruct
the state. Thanks. Feel free to send an updated patch with the changes.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-27 13:26 ` Matias Bjørling
@ 2016-01-27 14:58 ` Wenwei Tao
2016-01-27 19:46 ` Matias Bjørling
0 siblings, 1 reply; 15+ messages in thread
From: Wenwei Tao @ 2016-01-27 14:58 UTC (permalink / raw)
To: Matias Bjørling; +Cc: linux-kernel, linux-block
static int nvm_core_init(struct nvm_dev *dev)
{
...
dev->sec_per_pg = grp->fpg_sz / grp->csecs;
...
/* calculated values */
dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
...
}
static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
{
...
for (i = 0; i < rrpc->nr_luns; i++) {
...
rrpc->nr_pages += dev->sec_per_lun;
...
}
...
}
I prefer rrpc->nr_pages to be the number of pages allocated to rrpc,
but the code above indeed make me confuse about the sec and page
thing.
Hope I'm not misunderstand the code.
ps: I'm not an expert on flash, if the confusion is caused by lack of
knowledge about flash, pleas let me know.
2016-01-27 21:26 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 01/27/2016 01:47 PM, Wenwei Tao wrote:
>> 2016-01-27 17:36 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>> On 01/27/2016 07:06 AM, Wenwei Tao wrote:
>>>> Thanks.
>>>>
>>>> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>>>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>>>>
>>>>>> Yes, It's a spelling mistake, will correct it in next version.
>>>>>
>>>>>
>>>>> I can fix it in the version I apply. No problem.
>>>
>>> Hi Wenwei,
>>>
>>> I've changed it to this. Clean up the variables a bit. Is that ok with you?
>>>
>>> Thanks
>>>
>>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>>> index 33224cb..27a59e8 100644
>>> --- a/drivers/lightnvm/core.c
>>> +++ b/drivers/lightnvm/core.c
>>> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>>> dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
>>> INIT_LIST_HEAD(&dev->online_targets);
>>> mutex_init(&dev->mlock);
>>> + spin_lock_init(&dev->lock);
>>>
>>> return 0;
>>> }
>>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>>> index 7fb725b..6e2685d 100644
>>> --- a/drivers/lightnvm/gennvm.c
>>> +++ b/drivers/lightnvm/gennvm.c
>>> @@ -20,6 +20,63 @@
>>>
>>> #include "gennvm.h"
>>>
>>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>>> + sector_t size)
>>> +{
>>> + struct gen_nvm *gn = dev->mp;
>>> + struct gennvm_area *area, *prev;
>>> + int page_size = dev->sec_size * dev->sec_per_pg;
>>> + sector_t begin = 0;
>>> + sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>>> +
>>> + if (size > max_sectors)
>>> + return -EINVAL;
>>> +
>>> + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>>> + if (!area)
>>> + return -ENOMEM;
>>> +
>>> + spin_lock(&dev->lock);
>>> + list_for_each_entry(prev, &gn->area_list, list) {
>>> + if (begin + size > prev->begin) {
>>> + begin = prev->end;
>>> + continue;
>>> + }
>>> + break;
>>> + }
>>> +
>>> + if ((begin + size) > max_sectors) {
>>> + spin_unlock(&dev->lock);
>>> + kfree(area);
>>> + return -EINVAL;
>>> + }
>>> +
>>> + area->begin = *begin_sect = begin;
>>> + area->end = begin + size;
>>> + list_add(&area->list, &prev->list);
>>
>> I think I have made a mistake here. Insert the new area after prev
>> will not make the list increase by area->begin. And prev is not
>> trustable
>> when out of the loop, it may point to list_entry((head)->next,
>> typeof(*pos), member). Below is changed code:
>>
>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>> + sector_t size)
>> +{
>> + struct gen_nvm *gn = dev->mp;
>> + struct gennvm_area *area, *prev, *next;
>> + sector_t begin = 0;
>> + int page_size = dev->sec_size * dev->sec_per_pg;
>> + sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>> +
>> + if (size > max_sectors)
>> + return -EINVAL;
>> + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>> + if (!area)
>> + return -ENOMEM;
>> +
>> + prev = NULL;
>> +
>> + spin_lock(&dev->lock);
>> + list_for_each_entry(next, &gn->area_list, list) {
>> + if (begin + size > next->begin) {
>> + begin = next->end;
>> + prev = next;
>> + continue;
>> + }
>> + break;
>> + }
>> +
>> + if ((begin + size) > max_sectors) {
>> + spin_unlock(&dev->lock);
>> + kfree(area);
>> + return -EINVAL;
>> + }
>> +
>> + area->begin = *begin_sect = begin;
>> + area->end = begin + size;
>> + if (prev)
>> + list_add(&area->list, &prev->list);
>> + else
>> + list_add(&area->list, &gn->area_list);
>> + spin_unlock(&dev->lock);
>> + return 0;
>> +}
>>
>>
>>> + spin_unlock(&dev->lock);
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
>>> +{
>>> + struct gen_nvm *gn = dev->mp;
>>> + struct gennvm_area *area;
>>> +
>>> + spin_lock(&dev->lock);
>>> + list_for_each_entry(area, &gn->area_list, list) {
>>> + if (area->begin != begin)
>>> + continue;
>>> +
>>> + list_del(&area->list);
>>> + spin_unlock(&dev->lock);
>>> + kfree(area);
>>> + return;
>>> + }
>>> + spin_unlock(&dev->lock);
>>> +}
>>> +
>>> static void gennvm_blocks_free(struct nvm_dev *dev)
>>> {
>>> struct gen_nvm *gn = dev->mp;
>>> @@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
>>>
>>> gn->dev = dev;
>>> gn->nr_luns = dev->nr_luns;
>>> + INIT_LIST_HEAD(&gn->area_list);
>>> dev->mp = gn;
>>>
>>> ret = gennvm_luns_init(dev, gn);
>>> @@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
>>>
>>> .get_lun = gennvm_get_lun,
>>> .lun_info_print = gennvm_lun_info_print,
>>> +
>>> + .get_area = gennvm_get_area,
>>> + .put_area = gennvm_put_area,
>>> +
>>> };
>>>
>>> static int __init gennvm_module_init(void)
>>> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
>>> index 9c24b5b..04d7c23 100644
>>> --- a/drivers/lightnvm/gennvm.h
>>> +++ b/drivers/lightnvm/gennvm.h
>>> @@ -39,8 +39,14 @@ struct gen_nvm {
>>>
>>> int nr_luns;
>>> struct gen_lun *luns;
>>> + struct list_head area_list;
>>> };
>>>
>>> +struct gennvm_area {
>>> + struct list_head list;
>>> + sector_t begin;
>>> + sector_t end; /* end is excluded */
>>> +};
>>> #define gennvm_for_each_lun(bm, lun, i) \
>>> for ((i) = 0, lun = &(bm)->luns[0]; \
>>> (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
>>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>>> index e2710da..20afe1c 100644
>>> --- a/drivers/lightnvm/rrpc.c
>>> +++ b/drivers/lightnvm/rrpc.c
>>> @@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>> {
>>> struct nvm_dev *dev = rrpc->dev;
>>> sector_t i;
>>> - int ret;
>>> + u64 slba;
>>> + int ret, page_size;
>>> +
>>> + page_size = dev->sec_per_pg * dev->sec_size;
>>> + slba = rrpc->soffset >> (ilog2(page_size) - 9);
>>>
>>> rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
>>> if (!rrpc->trans_map)
>>> @@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>> return 0;
>>>
>>> /* Bring up the mapping table from device */
>>> - ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
>>> - rrpc_l2p_update, rrpc);
>>> + ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
>>> + rrpc);
>>
>> In rrpc_luns_init, rrpc->nr_pages seems to be the target's sector
>> number and previously dev->total_pages is used, dev->total_pages is
>> the nvm device page number, so I am a little confusing here.
>>
>
> The dev->total pages is all pages on media. The rrpc->nr_pages is the
> number of pages allocated to rrpc... which should be dev->total_pages
> here, as we want to retrieve the full l2p table, and then reconstruct
> the state. Thanks. Feel free to send an updated patch with the changes.
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 1/2] lightnvm: specify target's logical address area
2016-01-27 14:58 ` Wenwei Tao
@ 2016-01-27 19:46 ` Matias Bjørling
0 siblings, 0 replies; 15+ messages in thread
From: Matias Bjørling @ 2016-01-27 19:46 UTC (permalink / raw)
To: Wenwei Tao; +Cc: linux-kernel, linux-block
On 01/27/2016 03:58 PM, Wenwei Tao wrote:
> static int nvm_core_init(struct nvm_dev *dev)
> {
>
> ...
> dev->sec_per_pg = grp->fpg_sz / grp->csecs;
> ...
> /* calculated values */
> dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
> dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
> dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
> ...
>
> }
>
> static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
> {
> ...
> for (i = 0; i < rrpc->nr_luns; i++) {
> ...
> rrpc->nr_pages += dev->sec_per_lun;
> ...
> }
> ...
> }
>
> I prefer rrpc->nr_pages to be the number of pages allocated to rrpc,
> but the code above indeed make me confuse about the sec and page
> thing.
> Hope I'm not misunderstand the code.
> ps: I'm not an expert on flash, if the confusion is caused by lack of
> knowledge about flash, pleas let me know.
rrpc->nr_pages should properly be rrpc->nr_sects, as it is indeed
confusing that it is called a page, when we actually use it for number
of sectors.
Pages refers to 4-64KB units, of which we have 1-16 sectors inside.
I have pushed an update to for-next that renames rrpc->nr_pages to
rrpc->nr_sects. That should hopefully help future readers.
I also updated dev->total_pages to total_secs.
Thanks
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support
2016-01-27 9:44 ` Matias Bjørling
@ 2016-01-28 8:50 ` Wenwei Tao
2016-01-28 9:09 ` Matias Bjørling
0 siblings, 1 reply; 15+ messages in thread
From: Wenwei Tao @ 2016-01-28 8:50 UTC (permalink / raw)
To: Matias Bjørling; +Cc: linux-kernel, linux-block
2016-01-27 17:44 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 01/26/2016 01:33 PM, Wenwei Tao wrote:
>> When create a target, we specify the begin lunid and
>> the end lunid, and get the corresponding continuous
>> luns from media manager, if one of the luns is not free,
>> we failed to create the target, even if the device's
>> total free luns are enough.
>>
>> So add non-continuous lun target creation support,
>> thus we can improve the backend device's space utilization.
>> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
>> ---
>> Changes since v1:
>> -use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun
>> -add target creation flags check
>> -rebase to v4.5-rc1
>>
>> drivers/lightnvm/core.c | 36 ++++---
>> drivers/lightnvm/gennvm.c | 42 ++++++++-
>> drivers/lightnvm/rrpc.c | 215 +++++++++++++++++++++++++++---------------
>> drivers/lightnvm/rrpc.h | 6 +-
>> include/linux/lightnvm.h | 24 ++++-
>> include/uapi/linux/lightnvm.h | 3 +
>> 6 files changed, 229 insertions(+), 97 deletions(-)
>>
>
> Hi Wenwei,
>
> I did some digging on the patch and changed the interface to a
> reserve/release interface. I also removed the logic to dynamically
> select another lun than the one requested.
>
> A couple of questions:
>
> 1. The rrpc_lun->rev_lock and rev_trans_map change; this might be for
> another patch, and it isn't directly related to continuous mapping?
rrpc_lun->rev_lock and rev_trans_map change is related to
non-continuous mapping, it's not directly related to continuous
mapping.
Put this change in another patch along with non-continuous mapping
support and this patch would be only add reserve/release thing, is
that your suggestion?
> 2. Instead of dynamically assigning new luns when not available, what
> about taking a list of lun ids instead?
>
Seems you prefer user make the choice ?
But the target creation can still fail if one of the list lun ids is
not available although there may be enough free luns.
> I would only implement this in the lnvm ioctl interface. It would allow
> a list of lun ids to be passed through the lnvm ioctl interface. This
> way, the NVM_CONFIG_TYPE_SIMPLE can be extended with another
> NVM_CONFIG_TYPE_LIST, or similar, which then parses the ioctl
> appropriately. Would that be a better way to do it?
>
> Here is the diff. It is also rebased on top of the two latest patches
> that which are sent up for the next -rc.
>
> Thanks
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 27a59e8..59a4bf9 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -468,6 +468,11 @@ static int nvm_core_init(struct nvm_dev *dev)
> dev->luns_per_chnl *
> dev->nr_chnls;
> dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
> + dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
> + sizeof(unsigned long), GFP_KERNEL);
> + if (!dev->lun_map)
> + return -ENOMEM;
> +
> INIT_LIST_HEAD(&dev->online_targets);
> mutex_init(&dev->mlock);
> spin_lock_init(&dev->lock);
> @@ -610,6 +615,7 @@ void nvm_unregister(char *disk_name)
> up_write(&nvm_lock);
>
> nvm_exit(dev);
> + kfree(dev->lun_map);
> kfree(dev);
> }
> EXPORT_SYMBOL(nvm_unregister);
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 6e2685d..6419898 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -188,6 +188,9 @@ static int gennvm_block_map(u64 slba, u32 nlb,
> __le64 *entries, void *private)
> lun_id = div_u64(pba, dev->sec_per_lun);
> lun = &gn->luns[lun_id];
>
> + if (!test_bit(lun_id, dev->lun_map))
> + __set_bit(lun_id, dev->lun_map);
> +
> /* Calculate block offset into lun */
> pba = pba - (dev->sec_per_lun * lun_id);
> blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
> @@ -478,10 +481,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev,
> struct nvm_block *blk,
> return nvm_erase_ppa(dev, &addr, 1);
> }
>
> +static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
> +{
> + return test_and_set_bit(lunid, dev->lun_map);
> +}
> +
> +static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
> +{
> + WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
> +}
> +
> static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
> {
> struct gen_nvm *gn = dev->mp;
>
> + if (unlikely(lunid >= dev->nr_luns))
> + return NULL;
> +
> return &gn->luns[lunid].vlun;
> }
>
> @@ -523,6 +539,8 @@ static struct nvmm_type gennvm = {
> .erase_blk = gennvm_erase_blk,
>
> .get_lun = gennvm_get_lun,
> + .reserve_lun = gennvm_reserve_lun,
> + .release_lun = gennvm_release_lun,
> .lun_info_print = gennvm_lun_info_print,
>
> .get_area = gennvm_get_area,
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index 20afe1c..0a99ebc 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -26,25 +26,32 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct
> bio *bio,
> for ((i) = 0, rlun = &(rrpc)->luns[0]; \
> (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
>
> +static inline u64 lun_poffset(struct nvm_dev *dev, struct nvm_lun *lun)
> +{
> + return lun->id * dev->sec_per_lun;
> +}
> +
> static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
> {
> struct rrpc_block *rblk = a->rblk;
> - unsigned int pg_offset;
> + struct rrpc_lun *rlun = rblk->rlun;
> + u64 pg_offset;
>
> - lockdep_assert_held(&rrpc->rev_lock);
> + lockdep_assert_held(&rlun->rev_lock);
>
> if (a->addr == ADDR_EMPTY || !rblk)
> return;
>
> spin_lock(&rblk->lock);
>
> - div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
> + div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, (u32 *)&pg_offset);
> WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
> rblk->nr_invalid_pages++;
>
> spin_unlock(&rblk->lock);
>
> - rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
> + pg_offset = lun_poffset(rrpc->dev, rlun->parent);
> + rlun->rev_trans_map[a->addr - pg_offset].addr = ADDR_EMPTY;
> }
>
> static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
> @@ -52,14 +59,15 @@ static void rrpc_invalidate_range(struct rrpc *rrpc,
> sector_t slba,
> {
> sector_t i;
>
> - spin_lock(&rrpc->rev_lock);
> for (i = slba; i < slba + len; i++) {
> struct rrpc_addr *gp = &rrpc->trans_map[i];
> + struct rrpc_lun *rlun = gp->rblk->rlun;
>
> + spin_lock(&rlun->rev_lock);
> rrpc_page_invalidate(rrpc, gp);
> + spin_unlock(&rlun->rev_lock);
> gp->rblk = NULL;
> }
> - spin_unlock(&rrpc->rev_lock);
> }
>
> static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
> @@ -281,13 +289,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
> static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block
> *rblk)
> {
> struct request_queue *q = rrpc->dev->q;
> + struct rrpc_lun *rlun = rblk->rlun;
> struct rrpc_rev_addr *rev;
> struct nvm_rq *rqd;
> struct bio *bio;
> struct page *page;
> int slot;
> int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
> - u64 phys_addr;
> + u64 phys_addr, poffset;
> DECLARE_COMPLETION_ONSTACK(wait);
>
> if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
> @@ -303,6 +312,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc,
> struct rrpc_block *rblk)
> if (!page)
> return -ENOMEM;
>
> + poffset = lun_poffset(rrpc->dev, rlun->parent);
> while ((slot = find_first_zero_bit(rblk->invalid_pages,
> nr_pgs_per_blk)) < nr_pgs_per_blk) {
>
> @@ -310,23 +320,23 @@ static int rrpc_move_valid_pages(struct rrpc
> *rrpc, struct rrpc_block *rblk)
> phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
>
> try:
> - spin_lock(&rrpc->rev_lock);
> + spin_lock(&rlun->rev_lock);
> /* Get logical address from physical to logical table */
> - rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
> + rev = &rlun->rev_trans_map[phys_addr - poffset];
> /* already updated by previous regular write */
> if (rev->addr == ADDR_EMPTY) {
> - spin_unlock(&rrpc->rev_lock);
> + spin_unlock(&rlun->rev_lock);
> continue;
> }
>
> rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
> if (IS_ERR_OR_NULL(rqd)) {
> - spin_unlock(&rrpc->rev_lock);
> + spin_unlock(&rlun->rev_lock);
> schedule();
> goto try;
> }
>
> - spin_unlock(&rrpc->rev_lock);
> + spin_unlock(&rlun->rev_lock);
>
> /* Perform read to do GC */
> bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
> @@ -395,7 +405,7 @@ static void rrpc_block_gc(struct work_struct *work)
> struct rrpc_block *rblk = gcb->rblk;
> struct nvm_dev *dev = rrpc->dev;
> struct nvm_lun *lun = rblk->parent->lun;
> - struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
> + struct rrpc_lun *rlun = lun->private;
>
> mempool_free(gcb, rrpc->gcb_pool);
> pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
> @@ -496,9 +506,9 @@ static void rrpc_gc_queue(struct work_struct *work)
> ws_gc);
> struct rrpc *rrpc = gcb->rrpc;
> struct rrpc_block *rblk = gcb->rblk;
> - struct nvm_lun *lun = rblk->parent->lun;
> struct nvm_block *blk = rblk->parent;
> - struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
> + struct nvm_lun *lun = blk->lun;
> + struct rrpc_lun *rlun = lun->private;
>
> spin_lock(&rlun->lock);
> list_add_tail(&rblk->prio, &rlun->prio_list);
> @@ -549,22 +559,24 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct
> rrpc *rrpc, int is_gc)
> static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
> struct rrpc_block *rblk, u64 paddr)
> {
> + struct rrpc_lun *rlun = rblk->rlun;
> struct rrpc_addr *gp;
> struct rrpc_rev_addr *rev;
> + u64 poffset = lun_poffset(rrpc->dev, rlun->parent);
>
> BUG_ON(laddr >= rrpc->nr_pages);
>
> gp = &rrpc->trans_map[laddr];
> - spin_lock(&rrpc->rev_lock);
> + spin_lock(&rlun->rev_lock);
> if (gp->rblk)
> rrpc_page_invalidate(rrpc, gp);
>
> gp->addr = paddr;
> gp->rblk = rblk;
>
> - rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
> + rev = &rlun->rev_trans_map[gp->addr - poffset];
> rev->addr = laddr;
> - spin_unlock(&rrpc->rev_lock);
> + spin_unlock(&rlun->rev_lock);
>
> return gp;
> }
> @@ -953,8 +965,6 @@ static void rrpc_requeue(struct work_struct *work)
>
> static void rrpc_gc_free(struct rrpc *rrpc)
> {
> - struct rrpc_lun *rlun;
> - int i;
>
> if (rrpc->krqd_wq)
> destroy_workqueue(rrpc->krqd_wq);
> @@ -962,16 +972,6 @@ static void rrpc_gc_free(struct rrpc *rrpc)
> if (rrpc->kgc_wq)
> destroy_workqueue(rrpc->kgc_wq);
>
> - if (!rrpc->luns)
> - return;
> -
> - for (i = 0; i < rrpc->nr_luns; i++) {
> - rlun = &rrpc->luns[i];
> -
> - if (!rlun->blocks)
> - break;
> - vfree(rlun->blocks);
> - }
> }
>
> static int rrpc_gc_init(struct rrpc *rrpc)
> @@ -992,7 +992,6 @@ static int rrpc_gc_init(struct rrpc *rrpc)
>
> static void rrpc_map_free(struct rrpc *rrpc)
> {
> - vfree(rrpc->rev_trans_map);
> vfree(rrpc->trans_map);
> }
>
> @@ -1000,19 +999,28 @@ static int rrpc_l2p_update(u64 slba, u32 nlb,
> __le64 *entries, void *private)
> {
> struct rrpc *rrpc = (struct rrpc *)private;
> struct nvm_dev *dev = rrpc->dev;
> - struct rrpc_addr *addr = rrpc->trans_map + slba;
> - struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
> + struct rrpc_addr *addr;
> + struct rrpc_rev_addr *raddr;
> sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
> - u64 elba = slba + nlb;
> - u64 i;
> + int page_size = dev->sec_per_pg * dev->sec_size;
> + u64 elba, i;
> +
> + elba = slba + nlb;
>
> if (unlikely(elba > dev->total_pages)) {
> pr_err("nvm: L2P data from device is out of bounds!\n");
> return -EINVAL;
> }
>
> + slba -= rrpc->soffset >> (ilog2(page_size) - 9);
> + addr = rrpc->trans_map + slba;
> for (i = 0; i < nlb; i++) {
> + struct rrpc_lun *rlun;
> + struct nvm_lun *lun;
> u64 pba = le64_to_cpu(entries[i]);
> + u64 poffset;
> + int lunid;
> +
> /* LNVM treats address-spaces as silos, LBA and PBA are
> * equally large and zero-indexed.
> */
> @@ -1028,8 +1036,15 @@ static int rrpc_l2p_update(u64 slba, u32 nlb,
> __le64 *entries, void *private)
> if (!pba)
> continue;
>
> + lunid = div_u64(pba, dev->sec_per_lun);
> + lun = dev->mt->get_lun(dev, lunid);
> + if (unlikely(!lun))
> + return -EINVAL;
> + rlun = lun->private;
> + raddr = rlun->rev_trans_map;
> + poffset = lun_poffset(dev, lun);
> addr[i].addr = pba;
> - raddr[pba].addr = slba + i;
> + raddr[pba - poffset].addr = slba + i;
> }
>
> return 0;
> @@ -1049,17 +1064,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
> if (!rrpc->trans_map)
> return -ENOMEM;
>
> - rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
> - * rrpc->nr_pages);
> - if (!rrpc->rev_trans_map)
> - return -ENOMEM;
>
> for (i = 0; i < rrpc->nr_pages; i++) {
> struct rrpc_addr *p = &rrpc->trans_map[i];
> - struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
>
> p->addr = ADDR_EMPTY;
> - r->addr = ADDR_EMPTY;
> }
>
> if (!dev->ops->get_l2p_tbl)
> @@ -1130,22 +1139,86 @@ static void rrpc_core_free(struct rrpc *rrpc)
>
> static void rrpc_luns_free(struct rrpc *rrpc)
> {
> + struct nvm_dev *dev = rrpc->dev;
> + struct rrpc_lun *rlun;
> + struct nvm_lun *lun;
> + int i;
> +
> + if (!rrpc->luns)
> + return;
> +
> + for (i = 0; i < rrpc->nr_luns; i++) {
> + rlun = &rrpc->luns[i];
> + if (!rlun)
> + break;
> + lun = rlun->parent;
> + dev->mt->release_lun(dev, lun->id);
> + vfree(rlun->rev_trans_map);
> + vfree(rlun->blocks);
> + }
> kfree(rrpc->luns);
> + rrpc->luns = NULL;
> +
> +}
> +
> +static int rrpc_lun_init(struct rrpc *rrpc, struct rrpc_lun *rlun,
> + struct nvm_lun *lun)
> +{
> + struct nvm_dev *dev = rrpc->dev;
> + int i;
> +
> + rlun->rrpc = rrpc;
> + rlun->parent = lun;
> +
> + rlun->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) *
> + dev->sec_per_lun);
> + if (!rlun->rev_trans_map)
> + return -ENOMEM;
> +
> + for (i = 0; i < dev->sec_per_lun; i++) {
> + struct rrpc_rev_addr *r = &rlun->rev_trans_map[i];
> +
> + r->addr = ADDR_EMPTY;
> + }
> +
> + rlun->blocks = vzalloc(sizeof(struct rrpc_block) * dev->blks_per_lun);
> + if (!rlun->blocks) {
> + vfree(rlun->rev_trans_map);
> + return -ENOMEM;
> + }
> +
> + for (i = 0; i < dev->blks_per_lun; i++) {
> + struct rrpc_block *rblk = &rlun->blocks[i];
> + struct nvm_block *blk = &lun->blocks[i];
> +
> + rblk->parent = blk;
> + rblk->rlun = rlun;
> + INIT_LIST_HEAD(&rblk->prio);
> + spin_lock_init(&rblk->lock);
> + }
> +
> + lun->private = rlun;
> + INIT_LIST_HEAD(&rlun->prio_list);
> + INIT_LIST_HEAD(&rlun->open_list);
> + INIT_LIST_HEAD(&rlun->closed_list);
> + INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
> + spin_lock_init(&rlun->lock);
> + spin_lock_init(&rlun->rev_lock);
> +
> + return 0;
> }
>
> static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
> {
> struct nvm_dev *dev = rrpc->dev;
> struct rrpc_lun *rlun;
> - int i, j;
> + int i, ret;
>
> if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
> pr_err("rrpc: number of pages per block too high.");
> return -EINVAL;
> }
>
> - spin_lock_init(&rrpc->rev_lock);
> -
> rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
> GFP_KERNEL);
> if (!rrpc->luns)
> @@ -1153,40 +1226,35 @@ static int rrpc_luns_init(struct rrpc *rrpc, int
> lun_begin, int lun_end)
>
> /* 1:1 mapping */
> for (i = 0; i < rrpc->nr_luns; i++) {
> - struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
> + int lunid = lun_begin + i;
> + struct nvm_lun *lun;
> +
> + if (dev->mt->reserve_lun(dev, lunid)) {
> + pr_err("rrpc: lun %u is already allocated\n", lunid);
> + ret = -EINVAL;
> + goto err;
> + }
> +
> + lun = dev->mt->get_lun(dev, lunid);
> + if (!lun) {
> + ret = -EINVAL;
> + goto err;
> + }
>
> rlun = &rrpc->luns[i];
> - rlun->rrpc = rrpc;
> - rlun->parent = lun;
> - INIT_LIST_HEAD(&rlun->prio_list);
> - INIT_LIST_HEAD(&rlun->open_list);
> - INIT_LIST_HEAD(&rlun->closed_list);
> -
> - INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
> - spin_lock_init(&rlun->lock);
> + ret = rrpc_lun_init(rrpc, rlun, lun);
> + if (ret)
> + goto err;
>
> rrpc->total_blocks += dev->blks_per_lun;
> rrpc->nr_pages += dev->sec_per_lun;
> -
> - rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
> - rrpc->dev->blks_per_lun);
> - if (!rlun->blocks)
> - goto err;
> -
> - for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
> - struct rrpc_block *rblk = &rlun->blocks[j];
> - struct nvm_block *blk = &lun->blocks[j];
> -
> - rblk->parent = blk;
> - rblk->rlun = rlun;
> - INIT_LIST_HEAD(&rblk->prio);
> - spin_lock_init(&rblk->lock);
> - }
> }
>
> return 0;
> err:
> - return -ENOMEM;
> + rrpc_luns_free(rrpc);
> + return ret;
> +
> }
>
> /* returns 0 on success and stores the beginning address in *begin */
> @@ -1258,14 +1326,16 @@ static sector_t rrpc_capacity(void *private)
> static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block
> *rblk)
> {
> struct nvm_dev *dev = rrpc->dev;
> + struct rrpc_lun *rlun = rblk->rlun;
> int offset;
> struct rrpc_addr *laddr;
> - u64 paddr, pladdr;
> + u64 paddr, pladdr, poffset;
>
> + poffset = lun_poffset(dev, rlun->parent);
> for (offset = 0; offset < dev->pgs_per_blk; offset++) {
> paddr = block_to_addr(rrpc, rblk) + offset;
>
> - pladdr = rrpc->rev_trans_map[paddr].addr;
> + pladdr = rlun->rev_trans_map[paddr - poffset].addr;
> if (pladdr == ADDR_EMPTY)
> continue;
>
> @@ -1374,9 +1444,6 @@ static void *rrpc_init(struct nvm_dev *dev, struct
> gendisk *tdisk,
> goto err;
> }
>
> - rrpc->poffset = dev->sec_per_lun * lun_begin;
> - rrpc->lun_offset = lun_begin;
> -
> ret = rrpc_core_init(rrpc);
> if (ret) {
> pr_err("nvm: rrpc: could not initialize core\n");
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index 9380c68..4d756d8 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -86,6 +86,9 @@ struct rrpc_lun {
> */
>
> struct work_struct ws_gc;
> + /* store a reverse map for garbage collection */
> + struct rrpc_rev_addr *rev_trans_map;
> + spinlock_t rev_lock;
>
> spinlock_t lock;
> };
> @@ -124,9 +127,6 @@ struct rrpc {
> * addresses are used when writing to the disk block device.
> */
> struct rrpc_addr *trans_map;
> - /* also store a reverse map for garbage collection */
> - struct rrpc_rev_addr *rev_trans_map;
> - spinlock_t rev_lock;
>
> struct rrpc_inflight inflights;
>
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index 18f1bb0..a33af4f 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -271,6 +271,7 @@ struct nvm_lun {
> spinlock_t lock;
>
> struct nvm_block *blocks;
> + void *private;
> };
>
> enum {
> @@ -342,6 +343,8 @@ struct nvm_dev {
> int nr_luns;
> unsigned max_pages_per_blk;
>
> + unsigned long *lun_map;
> +
> void *ppalist_pool;
>
> struct nvm_id identity;
> @@ -462,6 +465,8 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *,
> struct nvm_rq *);
> typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
> unsigned long);
> typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
> +typedef int (nvmm_reserve_lun(struct nvm_dev *, int));
> +typedef void (nvmm_release_lun(struct nvm_dev *, int));
> typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
>
> typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
> @@ -488,6 +493,8 @@ struct nvmm_type {
>
> /* Configuration management */
> nvmm_get_lun_fn *get_lun;
> + nvmm_reserve_lun *reserve_lun;
> + nvmm_release_lun *release_lun;
>
> /* Statistics */
> nvmm_lun_info_print_fn *lun_info_print;
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support
2016-01-28 8:50 ` Wenwei Tao
@ 2016-01-28 9:09 ` Matias Bjørling
2016-01-28 10:19 ` Wenwei Tao
0 siblings, 1 reply; 15+ messages in thread
From: Matias Bjørling @ 2016-01-28 9:09 UTC (permalink / raw)
To: Wenwei Tao; +Cc: linux-kernel, linux-block
On 01/28/2016 09:50 AM, Wenwei Tao wrote:
> 2016-01-27 17:44 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>> On 01/26/2016 01:33 PM, Wenwei Tao wrote:
>>> When create a target, we specify the begin lunid and
>>> the end lunid, and get the corresponding continuous
>>> luns from media manager, if one of the luns is not free,
>>> we failed to create the target, even if the device's
>>> total free luns are enough.
>>>
>>> So add non-continuous lun target creation support,
>>> thus we can improve the backend device's space utilization.
>>> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
>>> ---
>>> Changes since v1:
>>> -use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun
>>> -add target creation flags check
>>> -rebase to v4.5-rc1
>>>
>>> drivers/lightnvm/core.c | 36 ++++---
>>> drivers/lightnvm/gennvm.c | 42 ++++++++-
>>> drivers/lightnvm/rrpc.c | 215 +++++++++++++++++++++++++++---------------
>>> drivers/lightnvm/rrpc.h | 6 +-
>>> include/linux/lightnvm.h | 24 ++++-
>>> include/uapi/linux/lightnvm.h | 3 +
>>> 6 files changed, 229 insertions(+), 97 deletions(-)
>>>
>>
>> Hi Wenwei,
>>
>> I did some digging on the patch and changed the interface to a
>> reserve/release interface. I also removed the logic to dynamically
>> select another lun than the one requested.
>>
>> A couple of questions:
>>
>> 1. The rrpc_lun->rev_lock and rev_trans_map change; this might be for
>> another patch, and it isn't directly related to continuous mapping?
>
> rrpc_lun->rev_lock and rev_trans_map change is related to
> non-continuous mapping, it's not directly related to continuous
> mapping.
> Put this change in another patch along with non-continuous mapping
> support and this patch would be only add reserve/release thing, is
> that your suggestion?
Yes, that would be great. Then we keep it separate. I'll like to do some
benchmarks with the patch on and off, to see the performance difference.
>
>> 2. Instead of dynamically assigning new luns when not available, what
>> about taking a list of lun ids instead?
>>
>
> Seems you prefer user make the choice ?
Yes, I want it to be deterministic. For example, if we do it
dynamically, the user might first allocate 2-4, and then allocate 1-3 ,
which will actually allocate 0,1,5. Then later, a user tries to allocate
on 0, and instead gets returned 6. It quickly makes it difficult to use.
> But the target creation can still fail if one of the list lun ids is
> not available although there may be enough free luns.
Agree, the user would have to look up the free luns and then resubmit
the target allocation.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support
2016-01-28 9:09 ` Matias Bjørling
@ 2016-01-28 10:19 ` Wenwei Tao
0 siblings, 0 replies; 15+ messages in thread
From: Wenwei Tao @ 2016-01-28 10:19 UTC (permalink / raw)
To: Matias Bjørling; +Cc: linux-kernel, linux-block
OK, I see. Will include these changes in next version.
2016-01-28 17:09 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 01/28/2016 09:50 AM, Wenwei Tao wrote:
>> 2016-01-27 17:44 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>> On 01/26/2016 01:33 PM, Wenwei Tao wrote:
>>>> When create a target, we specify the begin lunid and
>>>> the end lunid, and get the corresponding continuous
>>>> luns from media manager, if one of the luns is not free,
>>>> we failed to create the target, even if the device's
>>>> total free luns are enough.
>>>>
>>>> So add non-continuous lun target creation support,
>>>> thus we can improve the backend device's space utilization.
>>>> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
>>>> ---
>>>> Changes since v1:
>>>> -use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun
>>>> -add target creation flags check
>>>> -rebase to v4.5-rc1
>>>>
>>>> drivers/lightnvm/core.c | 36 ++++---
>>>> drivers/lightnvm/gennvm.c | 42 ++++++++-
>>>> drivers/lightnvm/rrpc.c | 215 +++++++++++++++++++++++++++---------------
>>>> drivers/lightnvm/rrpc.h | 6 +-
>>>> include/linux/lightnvm.h | 24 ++++-
>>>> include/uapi/linux/lightnvm.h | 3 +
>>>> 6 files changed, 229 insertions(+), 97 deletions(-)
>>>>
>>>
>>> Hi Wenwei,
>>>
>>> I did some digging on the patch and changed the interface to a
>>> reserve/release interface. I also removed the logic to dynamically
>>> select another lun than the one requested.
>>>
>>> A couple of questions:
>>>
>>> 1. The rrpc_lun->rev_lock and rev_trans_map change; this might be for
>>> another patch, and it isn't directly related to continuous mapping?
>>
>> rrpc_lun->rev_lock and rev_trans_map change is related to
>> non-continuous mapping, it's not directly related to continuous
>> mapping.
>> Put this change in another patch along with non-continuous mapping
>> support and this patch would be only add reserve/release thing, is
>> that your suggestion?
>
> Yes, that would be great. Then we keep it separate. I'll like to do some
> benchmarks with the patch on and off, to see the performance difference.
>
>>
>>> 2. Instead of dynamically assigning new luns when not available, what
>>> about taking a list of lun ids instead?
>>>
>>
>> Seems you prefer user make the choice ?
>
> Yes, I want it to be deterministic. For example, if we do it
> dynamically, the user might first allocate 2-4, and then allocate 1-3 ,
> which will actually allocate 0,1,5. Then later, a user tries to allocate
> on 0, and instead gets returned 6. It quickly makes it difficult to use.
>
>> But the target creation can still fail if one of the list lun ids is
>> not available although there may be enough free luns.
>
> Agree, the user would have to look up the free luns and then resubmit
> the target allocation.
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2016-01-28 10:19 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-01-26 12:33 [PATCH v2 1/2] lightnvm: specify target's logical address area Wenwei Tao
2016-01-26 12:33 ` [PATCH v2 2/2] lightnvm: add non-continuous lun target creation support Wenwei Tao
2016-01-27 9:44 ` Matias Bjørling
2016-01-28 8:50 ` Wenwei Tao
2016-01-28 9:09 ` Matias Bjørling
2016-01-28 10:19 ` Wenwei Tao
2016-01-26 14:52 ` [PATCH v2 1/2] lightnvm: specify target's logical address area Matias Bjørling
2016-01-27 2:21 ` Wenwei Tao
2016-01-27 5:52 ` Matias Bjørling
2016-01-27 6:06 ` Wenwei Tao
2016-01-27 9:36 ` Matias Bjørling
2016-01-27 12:47 ` Wenwei Tao
2016-01-27 13:26 ` Matias Bjørling
2016-01-27 14:58 ` Wenwei Tao
2016-01-27 19:46 ` Matias Bjørling
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox