From: Shai Malin <smalin@marvell.com>
To: <netdev@vger.kernel.org>, <linux-nvme@lists.infradead.org>,
<davem@davemloft.net>, <kuba@kernel.org>, <sagi@grimberg.me>,
<hch@lst.de>, <axboe@fb.com>, <kbusch@kernel.org>
Cc: <aelior@marvell.com>, <mkalderon@marvell.com>,
<okulkarni@marvell.com>, <pkushwaha@marvell.com>,
<malin1024@gmail.com>, <smalin@marvell.com>
Subject: [RFC PATCH v5 08/27] nvme-tcp-offload: Add Timeout and ASYNC Support
Date: Wed, 19 May 2021 14:13:21 +0300 [thread overview]
Message-ID: <20210519111340.20613-9-smalin@marvell.com> (raw)
In-Reply-To: <20210519111340.20613-1-smalin@marvell.com>
In this patch, we present the nvme-tcp-offload timeout support
nvme_tcp_ofld_timeout() and ASYNC support
nvme_tcp_ofld_submit_async_event().
Acked-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
Signed-off-by: Ariel Elior <aelior@marvell.com>
Signed-off-by: Shai Malin <smalin@marvell.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/nvme/host/tcp-offload.c | 85 ++++++++++++++++++++++++++++++++-
drivers/nvme/host/tcp-offload.h | 2 +
2 files changed, 86 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c
index 276b8475ac85..01b4c43cdaa5 100644
--- a/drivers/nvme/host/tcp-offload.c
+++ b/drivers/nvme/host/tcp-offload.c
@@ -133,6 +133,26 @@ void nvme_tcp_ofld_req_done(struct nvme_tcp_ofld_req *req,
nvme_complete_rq(rq);
}
+/**
+ * nvme_tcp_ofld_async_req_done() - NVMeTCP Offload request done callback
+ * function for async request. Pointed to by nvme_tcp_ofld_req->done.
+ * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQ.
+ * @req: NVMeTCP offload request to complete.
+ * @result: The nvme_result.
+ * @status: The completion status.
+ *
+ * API function that allows the vendor specific offload driver to report request
+ * completions to the common offload layer.
+ */
+void nvme_tcp_ofld_async_req_done(struct nvme_tcp_ofld_req *req,
+ union nvme_result *result, __le16 status)
+{
+ struct nvme_tcp_ofld_queue *queue = req->queue;
+ struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl;
+
+ nvme_complete_async_event(&ctrl->nctrl, status, result);
+}
+
struct nvme_tcp_ofld_dev *
nvme_tcp_ofld_lookup_dev(struct nvme_tcp_ofld_ctrl *ctrl)
{
@@ -733,7 +753,23 @@ void nvme_tcp_ofld_map_data(struct nvme_command *c, u32 data_len)
static void nvme_tcp_ofld_submit_async_event(struct nvme_ctrl *arg)
{
- /* Placeholder - submit_async_event */
+ struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(arg);
+ struct nvme_tcp_ofld_queue *queue = &ctrl->queues[0];
+ struct nvme_tcp_ofld_dev *dev = queue->dev;
+ struct nvme_tcp_ofld_ops *ops = dev->ops;
+
+ ctrl->async_req.nvme_cmd.common.opcode = nvme_admin_async_event;
+ ctrl->async_req.nvme_cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
+ ctrl->async_req.nvme_cmd.common.flags |= NVME_CMD_SGL_METABUF;
+
+ nvme_tcp_ofld_set_sg_null(&ctrl->async_req.nvme_cmd);
+
+ ctrl->async_req.async = true;
+ ctrl->async_req.queue = queue;
+ ctrl->async_req.last = true;
+ ctrl->async_req.done = nvme_tcp_ofld_async_req_done;
+
+ ops->send_req(&ctrl->async_req);
}
static void
@@ -1039,6 +1075,51 @@ static int nvme_tcp_ofld_poll(struct blk_mq_hw_ctx *hctx)
return ops->poll_queue(queue);
}
+static void nvme_tcp_ofld_complete_timed_out(struct request *rq)
+{
+ struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_ctrl *nctrl = &req->queue->ctrl->nctrl;
+
+ nvme_tcp_ofld_stop_queue(nctrl, nvme_tcp_ofld_qid(req->queue));
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+}
+
+static enum blk_eh_timer_return nvme_tcp_ofld_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_ofld_ctrl *ctrl = req->queue->ctrl;
+
+ dev_warn(ctrl->nctrl.device,
+ "queue %d: timeout request %#x type %d\n",
+ nvme_tcp_ofld_qid(req->queue), rq->tag, req->nvme_cmd.common.opcode);
+
+ if (ctrl->nctrl.state != NVME_CTRL_LIVE) {
+ /*
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
+ */
+ nvme_tcp_ofld_complete_timed_out(rq);
+
+ return BLK_EH_DONE;
+ }
+
+ nvme_tcp_ofld_error_recovery(&ctrl->nctrl);
+
+ return BLK_EH_RESET_TIMER;
+}
+
static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
.queue_rq = nvme_tcp_ofld_queue_rq,
.commit_rqs = nvme_tcp_ofld_commit_rqs,
@@ -1046,6 +1127,7 @@ static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
.init_request = nvme_tcp_ofld_init_request,
.exit_request = nvme_tcp_ofld_exit_request,
.init_hctx = nvme_tcp_ofld_init_hctx,
+ .timeout = nvme_tcp_ofld_timeout,
.map_queues = nvme_tcp_ofld_map_queues,
.poll = nvme_tcp_ofld_poll,
};
@@ -1056,6 +1138,7 @@ static struct blk_mq_ops nvme_tcp_ofld_admin_mq_ops = {
.init_request = nvme_tcp_ofld_init_request,
.exit_request = nvme_tcp_ofld_exit_request,
.init_hctx = nvme_tcp_ofld_init_admin_hctx,
+ .timeout = nvme_tcp_ofld_timeout,
};
static const struct nvme_ctrl_ops nvme_tcp_ofld_ctrl_ops = {
diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h
index 2233d855aa10..f897b811c399 100644
--- a/drivers/nvme/host/tcp-offload.h
+++ b/drivers/nvme/host/tcp-offload.h
@@ -117,6 +117,8 @@ struct nvme_tcp_ofld_ctrl {
/* Connectivity params */
struct nvme_tcp_ofld_ctrl_con_params conn_params;
+ struct nvme_tcp_ofld_req async_req;
+
/* Vendor specific driver context */
void *private_data;
};
--
2.22.0
_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
next prev parent reply other threads:[~2021-05-19 11:17 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-19 11:13 [RFC PATCH v5 00/27] NVMeTCP Offload ULP and QEDN Device Driver Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 01/27] nvme-tcp-offload: Add nvme-tcp-offload - NVMeTCP HW offload ULP Shai Malin
2021-05-21 17:06 ` Himanshu Madhani
2021-05-24 20:11 ` Shai Malin
2021-05-21 22:13 ` Sagi Grimberg
2021-05-24 20:08 ` Shai Malin
2021-06-08 9:28 ` Petr Mladek
2021-05-19 11:13 ` [RFC PATCH v5 02/27] nvme-fabrics: Move NVMF_ALLOWED_OPTS and NVMF_REQUIRED_OPTS definitions Shai Malin
2021-05-21 17:08 ` Himanshu Madhani
2021-05-21 22:15 ` Sagi Grimberg
2021-05-19 11:13 ` [RFC PATCH v5 03/27] nvme-tcp-offload: Add device scan implementation Shai Malin
2021-05-21 17:22 ` Himanshu Madhani
2021-05-21 22:22 ` Sagi Grimberg
2021-05-24 20:14 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 04/27] nvme-tcp-offload: Add controller level implementation Shai Malin
2021-05-21 17:19 ` Himanshu Madhani
2021-05-21 22:31 ` Sagi Grimberg
2021-05-27 20:03 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 05/27] nvme-tcp-offload: Add controller level error recovery implementation Shai Malin
2021-05-21 17:42 ` Himanshu Madhani
2021-05-21 22:34 ` Sagi Grimberg
2021-05-19 11:13 ` [RFC PATCH v5 06/27] nvme-tcp-offload: Add queue level implementation Shai Malin
2021-05-21 18:18 ` Himanshu Madhani
2021-05-21 22:48 ` Sagi Grimberg
2021-05-24 20:16 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 07/27] nvme-tcp-offload: Add IO " Shai Malin
2021-05-21 18:26 ` Himanshu Madhani
2021-05-19 11:13 ` Shai Malin [this message]
2021-05-21 18:36 ` [RFC PATCH v5 08/27] nvme-tcp-offload: Add Timeout and ASYNC Support Himanshu Madhani
2021-05-21 22:51 ` Sagi Grimberg
2021-05-24 20:17 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 09/27] qed: Add TCP_ULP FW resource layout Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 10/27] qed: Add NVMeTCP Offload PF Level FW and HW HSI Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 11/27] qed: Add NVMeTCP Offload Connection " Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 12/27] qed: Add support of HW filter block Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 13/27] qed: Add NVMeTCP Offload IO Level FW and HW HSI Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 14/27] qed: Add NVMeTCP Offload IO Level FW Initializations Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 15/27] qed: Add IP services APIs support Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 16/27] qedn: Add qedn - Marvell's NVMeTCP HW offload vendor driver Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 17/27] qedn: Add qedn probe Shai Malin
2021-05-19 12:31 ` Leon Romanovsky
2021-05-19 14:29 ` Shai Malin
2021-05-19 15:31 ` Leon Romanovsky
2021-05-19 11:13 ` [RFC PATCH v5 18/27] qedn: Add qedn_claim_dev API support Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 19/27] qedn: Add IRQ and fast-path resources initializations Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 20/27] qedn: Add connection-level slowpath functionality Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 21/27] qedn: Add support of configuring HW filter block Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 22/27] qedn: Add IO level qedn_send_req and fw_cq workqueue Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 23/27] qedn: Add support of Task and SGL Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 24/27] qedn: Add support of NVME ICReq & ICResp Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 25/27] qedn: Add IO level fastpath functionality Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 26/27] qedn: Add Connection and IO level recovery flows Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 27/27] qedn: Add support of ASYNC Shai Malin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210519111340.20613-9-smalin@marvell.com \
--to=smalin@marvell.com \
--cc=aelior@marvell.com \
--cc=axboe@fb.com \
--cc=davem@davemloft.net \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=malin1024@gmail.com \
--cc=mkalderon@marvell.com \
--cc=netdev@vger.kernel.org \
--cc=okulkarni@marvell.com \
--cc=pkushwaha@marvell.com \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox