From mboxrd@z Thu Jan 1 00:00:00 1970 From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= Subject: [PATCH 4/5 v2] null_blk: LightNVM support Date: Wed, 15 Apr 2015 14:34:43 +0200 Message-ID: <1429101284-19490-5-git-send-email-m@bjorling.me> References: <1429101284-19490-1-git-send-email-m@bjorling.me> Mime-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: QUOTED-PRINTABLE Cc: javier@paletta.io, keith.busch@intel.com, =?UTF-8?q?Matias=20Bj=C3=B8rling?= To: hch@infradead.org, axboe@fb.com, linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org Return-path: In-Reply-To: <1429101284-19490-1-git-send-email-m@bjorling.me> Sender: linux-kernel-owner@vger.kernel.org List-Id: linux-fsdevel.vger.kernel.org Initial support for LightNVM. The support can be used to benchmark performance of targets and core implementation. Signed-off-by: Matias Bj=C3=B8rling --- Documentation/block/null_blk.txt | 8 ++++ drivers/block/null_blk.c | 89 ++++++++++++++++++++++++++++++++= +++++--- 2 files changed, 92 insertions(+), 5 deletions(-) diff --git a/Documentation/block/null_blk.txt b/Documentation/block/nul= l_blk.txt index 2f6c6ff..b907ecc 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -70,3 +70,11 @@ use_per_node_hctx=3D[0/1]: Default: 0 parameter. 1: The multi-queue block layer is instantiated with a hardware dispa= tch queue for each CPU node in the system. + +IV: LightNVM specific parameters + +lightnvm_enable=3D[x]: Default: 0 + Enable LightNVM for null block devices. Requires blk-mq to be used. + +lightnvm_num_channels=3D[x]: Default: 1 + Number of LightNVM channels that are exposed to the LightNVM driver. diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 65cd61a..9cf566e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -7,6 +7,7 @@ #include #include #include +#include #include =20 struct nullb_cmd { @@ -147,6 +148,14 @@ static bool use_per_node_hctx =3D false; module_param(use_per_node_hctx, bool, S_IRUGO); MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardw= are context queues. Default: false"); =20 +static bool nvm_enable; +module_param(nvm_enable, bool, S_IRUGO); +MODULE_PARM_DESC(nvm_enable, "Enable Open-channel SSD. Default: false"= ); + +static int nvm_num_channels =3D 1; +module_param(nvm_num_channels, int, S_IRUGO); +MODULE_PARM_DESC(nvm_num_channels, "Number of channels to be exposed f= rom the Open-Channel SSD. Default: 1"); + static void put_tag(struct nullb_queue *nq, unsigned int tag) { clear_bit_unlock(tag, nq->tag_map); @@ -351,6 +360,50 @@ static void null_request_fn(struct request_queue *= q) } } =20 +static int null_nvm_id(struct request_queue *q, struct nvm_id *id) +{ + sector_t size =3D gb * 1024 * 1024 * 1024ULL; + unsigned long per_chnl_size =3D + size / bs / nvm_num_channels; + struct nvm_id_chnl *chnl; + int i; + + id->ver_id =3D 0x1; + id->nvm_type =3D NVM_NVMT_BLK; + id->nchannels =3D nvm_num_channels; + + id->chnls =3D kmalloc_array(id->nchannels, sizeof(struct nvm_id_chnl)= , + GFP_KERNEL); + if (!id->chnls) + return -ENOMEM; + + for (i =3D 0; i < id->nchannels; i++) { + chnl =3D &id->chnls[i]; + chnl->queue_size =3D hw_queue_depth; + chnl->gran_read =3D bs; + chnl->gran_write =3D bs; + chnl->gran_erase =3D bs * 256; + chnl->oob_size =3D 0; + chnl->t_r =3D chnl->t_sqr =3D 25000; /* 25us */ + chnl->t_w =3D chnl->t_sqw =3D 500000; /* 500us */ + chnl->t_e =3D 1500000; /* 1.500us */ + chnl->io_sched =3D NVM_IOSCHED_CHANNEL; + chnl->laddr_begin =3D per_chnl_size * i; + chnl->laddr_end =3D per_chnl_size * (i + 1) - 1; + } + + return 0; +} + +static int null_nvm_get_features(struct request_queue *q, + struct nvm_get_features *gf) +{ + gf->rsp =3D 0; + gf->ext =3D 0; + + return 0; +} + static int null_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -387,6 +440,11 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hc= tx, void *data, return 0; } =20 +static struct nvm_dev_ops null_nvm_dev_ops =3D { + .identify =3D null_nvm_id, + .get_features =3D null_nvm_get_features, +}; + static struct blk_mq_ops null_mq_ops =3D { .queue_rq =3D null_queue_rq, .map_queue =3D blk_mq_map_queue, @@ -525,6 +583,17 @@ static int null_add_dev(void) nullb->tag_set.flags =3D BLK_MQ_F_SHOULD_MERGE; nullb->tag_set.driver_data =3D nullb; =20 + if (nvm_enable) { + nullb->tag_set.flags &=3D ~BLK_MQ_F_SHOULD_MERGE; + nullb->tag_set.flags |=3D BLK_MQ_F_NVM; + + if (bs !=3D 4096) { + pr_warn("null_blk: only 4K block is supported for Open-Channel SSD= s. bs is set to 4K.\n"); + bs =3D 4096; + } + + } + rv =3D blk_mq_alloc_tag_set(&nullb->tag_set); if (rv) goto out_cleanup_queues; @@ -567,11 +636,6 @@ static int null_add_dev(void) goto out_cleanup_blk_queue; } =20 - mutex_lock(&lock); - list_add_tail(&nullb->list, &nullb_list); - nullb->index =3D nullb_indexes++; - mutex_unlock(&lock); - blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); =20 @@ -579,16 +643,31 @@ static int null_add_dev(void) sector_div(size, bs); set_capacity(disk, size); =20 + mutex_lock(&lock); + nullb->index =3D nullb_indexes++; + list_add_tail(&nullb->list, &nullb_list); + mutex_unlock(&lock); + disk->flags |=3D GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO= ; disk->major =3D null_major; disk->first_minor =3D nullb->index; disk->fops =3D &null_fops; disk->private_data =3D nullb; disk->queue =3D nullb->q; + + if (nvm_enable && queue_mode =3D=3D NULL_Q_MQ) { + if (blk_nvm_register(nullb->q, &null_nvm_dev_ops)) + goto out_cleanup_nvm; + + nullb->q->nvm->drv_cmd_size =3D sizeof(struct nullb_cmd); + } + sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; =20 +out_cleanup_nvm: + put_disk(disk); out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: --=20 1.9.1