From: James Smart <jsmart2021@gmail.com>
To: linux-nvme@lists.infradead.org
Cc: James Smart <jsmart2021@gmail.com>
Subject: [PATCH v2 16/26] nvme-fcloop: add target to host LS request support
Date: Tue, 31 Mar 2020 09:50:01 -0700 [thread overview]
Message-ID: <20200331165011.15819-17-jsmart2021@gmail.com> (raw)
In-Reply-To: <20200331165011.15819-1-jsmart2021@gmail.com>
Add support for performing LS requests from target to host.
Include sending request from targetport, reception into host,
host sending ls rsp.
Signed-off-by: James Smart <jsmart2021@gmail.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/nvme/target/fcloop.c | 130 +++++++++++++++++++++++++++++++++++++++----
1 file changed, 118 insertions(+), 12 deletions(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 9a3ce0cc31db..809f11c5d560 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -208,10 +208,13 @@ struct fcloop_rport {
};
struct fcloop_tport {
- struct nvmet_fc_target_port *targetport;
- struct nvme_fc_remote_port *remoteport;
- struct fcloop_nport *nport;
- struct fcloop_lport *lport;
+ struct nvmet_fc_target_port *targetport;
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
};
struct fcloop_nport {
@@ -226,11 +229,6 @@ struct fcloop_nport {
u32 port_id;
};
-enum {
- H2T = 0,
- T2H = 1,
-};
-
struct fcloop_lsreq {
struct nvmefc_ls_req *lsreq;
struct nvmefc_ls_rsp ls_rsp;
@@ -337,7 +335,6 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
struct fcloop_rport *rport = remoteport->private;
int ret = 0;
- tls_req->lsdir = H2T;
tls_req->lsreq = lsreq;
INIT_LIST_HEAD(&tls_req->ls_list);
@@ -351,8 +348,9 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
}
tls_req->status = 0;
- ret = nvmet_fc_rcv_ls_req(rport->targetport, NULL, &tls_req->ls_rsp,
- lsreq->rqstaddr, lsreq->rqstlen);
+ ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
+ &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
return ret;
}
@@ -384,6 +382,99 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
return 0;
}
+static void
+fcloop_tport_lsrqst_work(struct work_struct *work)
+{
+ struct fcloop_tport *tport =
+ container_of(work, struct fcloop_tport, ls_work);
+ struct fcloop_lsreq *tls_req;
+
+ spin_lock(&tport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&tport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&tport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&tport->lock);
+ }
+ spin_unlock(&tport->lock);
+}
+
+static int
+fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ struct nvmefc_ls_req *lsreq)
+{
+ struct fcloop_lsreq *tls_req = lsreq->private;
+ struct fcloop_tport *tport = targetport->private;
+ int ret = 0;
+
+ /*
+ * hosthandle should be the dst.rport value.
+ * hosthandle ignored as fcloop currently is
+ * 1:1 tgtport vs remoteport
+ */
+ tls_req->lsreq = lsreq;
+ INIT_LIST_HEAD(&tls_req->ls_list);
+
+ if (!tport->remoteport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ schedule_work(&tport->ls_work);
+ return ret;
+ }
+
+ tls_req->status = 0;
+ ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
+
+ return ret;
+}
+
+static int
+fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *lsrsp)
+{
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport = remoteport->private;
+ struct nvmet_fc_target_port *targetport = rport->targetport;
+ struct fcloop_tport *tport;
+
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
+ lsrsp->done(lsrsp);
+
+ if (targetport) {
+ tport = targetport->private;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ schedule_work(&tport->ls_work);
+ }
+
+ return 0;
+}
+
+static void
+fcloop_t2h_host_release(void *hosthandle)
+{
+ /* host handle ignored for now */
+}
+
/*
* Simulate reception of RSCN and converting it to a initiator transport
* call to rescan a remote port.
@@ -775,6 +866,12 @@ fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
{
}
+static void
+fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq)
+{
+}
+
static void
fcloop_fcp_abort(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
@@ -874,6 +971,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
struct fcloop_tport *tport = targetport->private;
+ flush_work(&tport->ls_work);
fcloop_nport_put(tport->nport);
}
@@ -891,6 +989,7 @@ static struct nvme_fc_port_template fctemplate = {
.fcp_io = fcloop_fcp_req,
.ls_abort = fcloop_h2t_ls_abort,
.fcp_abort = fcloop_fcp_abort,
+ .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -909,6 +1008,9 @@ static struct nvmet_fc_target_template tgttemplate = {
.fcp_abort = fcloop_tgt_fcp_abort,
.fcp_req_release = fcloop_fcp_req_release,
.discovery_event = fcloop_tgt_discovery_evt,
+ .ls_req = fcloop_t2h_ls_req,
+ .ls_abort = fcloop_t2h_ls_abort,
+ .host_release = fcloop_t2h_host_release,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -917,6 +1019,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
};
static ssize_t
@@ -1266,6 +1369,9 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
tport->nport = nport;
tport->lport = nport->lport;
nport->tport = tport;
+ spin_lock_init(&tport->lock);
+ INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
+ INIT_LIST_HEAD(&tport->ls_list);
return count;
}
--
2.16.4
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
next prev parent reply other threads:[~2020-03-31 16:54 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-31 16:49 [PATCH v2 00/26] nvme-fc/nvmet-fc: Add FC-NVME-2 disconnect association support James Smart
2020-03-31 16:49 ` [PATCH v2 01/26] nvme-fc: Sync header to FC-NVME-2 rev 1.08 James Smart
2020-03-31 16:49 ` [PATCH v2 02/26] nvme-fc and nvmet-fc: revise LLDD api for LS reception and LS request James Smart
2020-03-31 16:49 ` [PATCH v2 03/26] nvme-fc nvmet-fc: refactor for common LS definitions James Smart
2020-03-31 16:49 ` [PATCH v2 04/26] nvmet-fc: Better size LS buffers James Smart
2020-03-31 16:49 ` [PATCH v2 05/26] nvme-fc: Ensure private pointers are NULL if no data James Smart
2020-03-31 16:49 ` [PATCH v2 06/26] nvme-fc: convert assoc_active flag to bit op James Smart
2020-03-31 16:49 ` [PATCH v2 07/26] nvme-fc: Update header and host for common definitions for LS handling James Smart
2020-03-31 16:49 ` [PATCH v2 08/26] nvmet-fc: Update target " James Smart
2020-03-31 16:49 ` [PATCH v2 09/26] nvme-fc: Add Disconnect Association Rcv support James Smart
2020-03-31 16:49 ` [PATCH v2 10/26] nvmet-fc: add LS failure messages James Smart
2020-03-31 16:49 ` [PATCH v2 11/26] nvmet-fc: perform small cleanups on unneeded checks James Smart
2020-03-31 16:49 ` [PATCH v2 12/26] nvmet-fc: track hostport handle for associations James Smart
2020-03-31 16:49 ` [PATCH v2 13/26] nvmet-fc: rename ls_list to ls_rcv_list James Smart
2020-03-31 16:49 ` [PATCH v2 14/26] nvmet-fc: Add Disconnect Association Xmt support James Smart
2020-03-31 16:50 ` [PATCH v2 15/26] nvme-fcloop: refactor to enable target to host LS James Smart
2020-03-31 16:50 ` James Smart [this message]
2020-03-31 16:50 ` [PATCH v2 17/26] lpfc: Refactor lpfc nvme headers James Smart
2020-03-31 16:50 ` [PATCH v2 18/26] lpfc: Refactor nvmet_rcv_ctx to create lpfc_async_xchg_ctx James Smart
2020-03-31 16:50 ` [PATCH v2 19/26] lpfc: Commonize lpfc_async_xchg_ctx state and flag definitions James Smart
2020-03-31 16:50 ` [PATCH v2 20/26] lpfc: Refactor NVME LS receive handling James Smart
2020-03-31 16:50 ` [PATCH v2 21/26] lpfc: Refactor Send LS Request support James Smart
2020-03-31 16:50 ` [PATCH v2 22/26] lpfc: Refactor Send LS Abort support James Smart
2020-03-31 16:50 ` [PATCH v2 23/26] lpfc: Refactor Send LS Response support James Smart
2020-03-31 16:50 ` [PATCH v2 24/26] lpfc: nvme: Add Receive LS Request and Send LS Response support to nvme James Smart
2020-03-31 16:50 ` [PATCH v2 25/26] lpfc: nvmet: Add support for NVME LS request hosthandle James Smart
2020-03-31 16:50 ` [PATCH v2 26/26] lpfc: nvmet: Add Send LS Request and Abort LS Request support James Smart
2020-04-01 8:27 ` [PATCH v2 00/26] nvme-fc/nvmet-fc: Add FC-NVME-2 disconnect association support Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200331165011.15819-17-jsmart2021@gmail.com \
--to=jsmart2021@gmail.com \
--cc=linux-nvme@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox