linux-scsi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
To: target-devel <target-devel@vger.kernel.org>
Cc: linux-nvme <linux-nvme@lists.infradead.org>,
	linux-scsi <linux-scsi@vger.kernel.org>,
	Jens Axboe <axboe@fb.com>, Christoph Hellwig <hch@lst.de>,
	Martin Petersen <martin.petersen@oracle.com>,
	Sagi Grimberg <sagi@grimberg.me>, Hannes Reinecke <hare@suse.de>,
	Mike Christie <michaelc@cs.wisc.edu>,
	Dave B Minturn <dave.b.minturn@intel.com>,
	Nicholas Bellinger <nab@linux-iscsi.org>
Subject: [RFC 6/8] nvmet/io-cmd: Hookup sbc_ops->execute_unmap backend ops
Date: Tue,  7 Jun 2016 06:36:54 +0000	[thread overview]
Message-ID: <1465281416-28355-7-git-send-email-nab@linux-iscsi.org> (raw)
In-Reply-To: <1465281416-28355-1-git-send-email-nab@linux-iscsi.org>

From: Nicholas Bellinger <nab@linux-iscsi.org>

This patch converts nvmet_execute_discard() to utilize
sbc_ops->execute_unmap() for target_iostate submission
into existing backends drivers via configfs in
/sys/kernel/config/target/core/.

Cc: Jens Axboe <axboe@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Martin Petersen <martin.petersen@oracle.com>
Cc: Sagi Grimberg <sagi@grimberg.me>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/nvme/target/io-cmd.c | 47 ++++++++++++++++++++++++++++++++------------
 1 file changed, 34 insertions(+), 13 deletions(-)

diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 23905a8..605f560 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -126,52 +126,73 @@ static void nvmet_execute_flush(struct nvmet_req *req)
 	rc = sbc_ops->execute_sync_cache(ios, false);
 }
 
-#if 0
-static u16 nvmet_discard_range(struct nvmet_ns *ns,
-		struct nvme_dsm_range *range, int type, struct bio **bio)
+static u16 nvmet_discard_range(struct nvmet_req *req, struct sbc_ops *sbc_ops,
+		struct nvme_dsm_range *range, struct bio **bio)
 {
-	if (__blkdev_issue_discard(ns->bdev,
+	struct nvmet_ns *ns = req->ns;
+	sense_reason_t rc;
+
+	rc = sbc_ops->execute_unmap(&req->t_iostate,
 			le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
 			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
-			GFP_KERNEL, type, bio))
+			bio);
+	if (rc)
 		return NVME_SC_INTERNAL | NVME_SC_DNR;
 
 	return 0;
 }
-#endif
+
+static void nvmet_discard_bio_done(struct bio *bio)
+{
+	struct nvmet_req *req = bio->bi_private;
+	int err = bio->bi_error;
+
+	bio_put(bio);
+	nvmet_req_complete(req, err ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+}
 
 static void nvmet_execute_discard(struct nvmet_req *req)
 {
-#if 0
-	struct nvme_dsm_range range;
+	struct target_iostate *ios = &req->t_iostate;
+	struct se_device *dev = rcu_dereference_raw(req->ns->dev);
+	struct sbc_ops *sbc_ops = dev->transport->sbc_ops;
 	struct bio *bio = NULL;
-	int type = REQ_WRITE | REQ_DISCARD, i;
+	struct nvme_dsm_range range;
+	int i;
 	u16 status;
 
+	if (!sbc_ops || !sbc_ops->execute_unmap) {
+		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+		return;
+	}
+
+	ios->se_dev = dev;
+	ios->iomem = NULL;
+	ios->t_comp_func = NULL;
+
 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
 				sizeof(range));
 		if (status)
 			break;
 
-		status = nvmet_discard_range(req->ns, &range, type, &bio);
+		status = nvmet_discard_range(req, sbc_ops, &range, &bio);
 		if (status)
 			break;
 	}
 
 	if (bio) {
 		bio->bi_private = req;
-		bio->bi_end_io = nvmet_bio_done;
+		bio->bi_end_io = nvmet_discard_bio_done;
 		if (status) {
 			bio->bi_error = -EIO;
 			bio_endio(bio);
 		} else {
-			submit_bio(type, bio);
+			submit_bio(REQ_WRITE | REQ_DISCARD, bio);
 		}
 	} else {
 		nvmet_req_complete(req, status);
 	}
-#endif
 }
 
 static void nvmet_execute_dsm(struct nvmet_req *req)
-- 
1.9.1


  parent reply	other threads:[~2016-06-07  6:38 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-07  6:36 [RFC 0/8] nvmet: Add support for multi-tenant configfs Nicholas A. Bellinger
2016-06-07  6:36 ` [RFC 1/8] nvmet: Add nvmet_fabric_ops get/put transport helpers Nicholas A. Bellinger
2016-06-07  6:36 ` [RFC 2/8] nvmet: Add support for configfs-ng multi-tenant logic Nicholas A. Bellinger
2016-06-07  6:36 ` [RFC 3/8] nvmet: Hookup nvmet_ns->dev to nvmet_ns_enable Nicholas A. Bellinger
2016-06-07  6:36 ` [RFC 4/8] nvmet/io-cmd: Hookup sbc_ops->execute_rw backend ops Nicholas A. Bellinger
2016-06-07  6:36 ` [RFC 5/8] nvmet/io-cmd: Hookup sbc_ops->execute_sync_cache " Nicholas A. Bellinger
2016-06-07  6:36 ` Nicholas A. Bellinger [this message]
2016-06-07  6:36 ` [RFC 7/8] nvmet/admin-cmd: Hookup T10-PI to ID_NS.ms + ID_NS.dps feature bits Nicholas A. Bellinger
2016-06-09 13:52   ` Christoph Hellwig
2016-06-10  6:55     ` Nicholas A. Bellinger
2016-06-07  6:36 ` [RFC 8/8] nvme/loop: Add support for bio integrity handling Nicholas A. Bellinger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1465281416-28355-7-git-send-email-nab@linux-iscsi.org \
    --to=nab@linux-iscsi.org \
    --cc=axboe@fb.com \
    --cc=dave.b.minturn@intel.com \
    --cc=hare@suse.de \
    --cc=hch@lst.de \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=michaelc@cs.wisc.edu \
    --cc=sagi@grimberg.me \
    --cc=target-devel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).