From: Boris Pismenny <borisp@nvidia.com>
To: <dsahern@gmail.com>, <kuba@kernel.org>, <davem@davemloft.net>,
<saeedm@nvidia.com>, <hch@lst.de>, <sagi@grimberg.me>,
<axboe@fb.com>, <kbusch@kernel.org>, <viro@zeniv.linux.org.uk>,
<edumazet@google.com>, <smalin@marvell.com>
Cc: <boris.pismenny@gmail.com>, <linux-nvme@lists.infradead.org>,
<netdev@vger.kernel.org>, <benishay@nvidia.com>,
<ogerlitz@nvidia.com>, <yorayz@nvidia.com>,
Boris Pismenny <borisp@mellanox.com>,
Ben Ben-Ishay <benishay@mellanox.com>,
Or Gerlitz <ogerlitz@mellanox.com>,
Yoray Zack <yorayz@mellanox.com>
Subject: [PATCH v5 net-next 06/36] nvme-tcp: Add DDP data-path
Date: Thu, 22 Jul 2021 14:02:55 +0300 [thread overview]
Message-ID: <20210722110325.371-7-borisp@nvidia.com> (raw)
In-Reply-To: <20210722110325.371-1-borisp@nvidia.com>
From: Boris Pismenny <borisp@mellanox.com>
Introduce the NVMe-TCP DDP data-path offload.
Using this interface, the NIC hardware will scatter TCP payload directly
to the BIO pages according to the command_id in the PDU.
To maintain the correctness of the network stack, the driver is expected
to construct SKBs that point to the BIO pages.
The data-path interface contains two routines: tcp_ddp_setup/teardown.
The setup provides the mapping from command_id to the request buffers,
while the teardown removes this mapping.
For efficiency, we introduce an asynchronous nvme completion, which is
split between NVMe-TCP and the NIC driver as follows:
NVMe-TCP performs the specific completion, while NIC driver performs the
generic mq_blk completion.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Ben Ben-Ishay <benishay@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Yoray Zack <yorayz@mellanox.com>
---
drivers/nvme/host/tcp.c | 150 ++++++++++++++++++++++++++++++++++++----
1 file changed, 138 insertions(+), 12 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index f1a5520cabec..34982fb0c655 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -57,6 +57,11 @@ struct nvme_tcp_request {
size_t offset;
size_t data_sent;
enum nvme_tcp_send_state state;
+
+ bool offloaded;
+ struct ulp_ddp_io ddp;
+ __le16 status;
+ union nvme_result result;
};
enum nvme_tcp_queue_flags {
@@ -225,13 +230,76 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
return nvme_tcp_pdu_data_left(req) <= len;
}
+static int nvme_tcp_req_map_sg(struct nvme_tcp_request *req, struct request *rq)
+{
+ int ret;
+
+ req->ddp.sg_table.sgl = req->ddp.first_sgl;
+ ret = sg_alloc_table_chained(&req->ddp.sg_table, blk_rq_nr_phys_segments(rq),
+ req->ddp.sg_table.sgl, SG_CHUNK_SIZE);
+ if (ret)
+ return -ENOMEM;
+ req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl);
+ return 0;
+}
+
#ifdef CONFIG_ULP_DDP
static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
+static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
.resync_request = nvme_tcp_resync_request,
+ .ddp_teardown_done = nvme_tcp_ddp_teardown_done,
};
+static int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
+ u16 command_id,
+ struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct net_device *netdev = queue->ctrl->offloading_netdev;
+ int ret;
+
+ ret = netdev->ulp_ddp_ops->ulp_ddp_teardown(netdev, queue->sock->sk,
+ &req->ddp, rq);
+ sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
+ return ret;
+}
+
+static void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
+{
+ struct request *rq = ddp_ctx;
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ if (!nvme_try_complete_req(rq, req->status, req->result))
+ nvme_complete_rq(rq);
+}
+
+static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
+ u16 command_id,
+ struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct net_device *netdev = queue->ctrl->offloading_netdev;
+ int ret;
+
+ if (!test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags) ||
+ !blk_rq_nr_phys_segments(rq) || !(rq_data_dir(rq) == READ))
+ return -EINVAL;
+
+ req->ddp.command_id = command_id;
+ ret = nvme_tcp_req_map_sg(req, rq);
+ if (ret)
+ return -ENOMEM;
+
+ ret = netdev->ulp_ddp_ops->ulp_ddp_setup(netdev,
+ queue->sock->sk,
+ &req->ddp);
+ if (!ret)
+ req->offloaded = true;
+ return ret;
+}
+
static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
{
struct net_device *netdev = queue->ctrl->offloading_netdev;
@@ -342,7 +410,7 @@ static void nvme_tcp_resync_response(struct nvme_tcp_queue *queue,
return;
if (unlikely(!netdev)) {
- pr_info_ratelimited("%s: netdev not found\n", __func__);
+ dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n");
return;
}
@@ -367,6 +435,20 @@ static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags)
#else
+static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
+ u16 command_id,
+ struct request *rq)
+{
+ return -EINVAL;
+}
+
+static int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
+ u16 command_id,
+ struct request *rq)
+{
+ return -EINVAL;
+}
+
static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
{
return -EINVAL;
@@ -650,6 +732,24 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
}
+static void nvme_tcp_complete_request(struct request *rq,
+ __le16 status,
+ union nvme_result result,
+ __u16 command_id)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_queue *queue = req->queue;
+
+ if (req->offloaded) {
+ req->status = status;
+ req->result = result;
+ nvme_tcp_teardown_ddp(queue, command_id, rq);
+ } else {
+ if (!nvme_try_complete_req(rq, status, result))
+ nvme_complete_rq(rq);
+ }
+}
+
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
struct nvme_completion *cqe)
{
@@ -664,10 +764,8 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL;
}
- if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
- nvme_complete_rq(rq);
+ nvme_tcp_complete_request(rq, cqe->status, cqe->result, cqe->command_id);
queue->nr_cqe++;
-
return 0;
}
@@ -863,10 +961,39 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_queue *queue = req->queue;
+ struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
union nvme_result res = {};
- if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
- nvme_complete_rq(rq);
+ nvme_tcp_complete_request(rq, cpu_to_le16(status << 1), res, pdu->command_id);
+}
+
+
+static int nvme_tcp_consume_skb(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ unsigned int *offset, struct iov_iter *iter, int recv_len)
+{
+ int ret;
+
+#ifdef CONFIG_ULP_DDP
+ if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) {
+ if (queue->data_digest)
+ ret = skb_ddp_copy_and_hash_datagram_iter(skb, *offset, iter, recv_len,
+ queue->rcv_hash);
+ else
+ ret = skb_ddp_copy_datagram_iter(skb, *offset, iter, recv_len);
+ } else {
+#endif
+ if (queue->data_digest)
+ ret = skb_copy_and_hash_datagram_iter(skb, *offset, iter, recv_len,
+ queue->rcv_hash);
+ else
+ ret = skb_copy_datagram_iter(skb, *offset, iter, recv_len);
+#ifdef CONFIG_ULP_DDP
+ }
+#endif
+
+ return ret;
}
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
@@ -913,12 +1040,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
recv_len = min_t(size_t, recv_len,
iov_iter_count(&req->iter));
- if (queue->data_digest)
- ret = skb_copy_and_hash_datagram_iter(skb, *offset,
- &req->iter, recv_len, queue->rcv_hash);
- else
- ret = skb_copy_datagram_iter(skb, *offset,
- &req->iter, recv_len);
+ ret = nvme_tcp_consume_skb(queue, skb, offset, &req->iter, recv_len);
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"queue %d failed to copy request %#x data",
@@ -1142,6 +1264,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
bool inline_data = nvme_tcp_has_inline_data(req);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) + hdgst - req->offset;
+ struct request *rq = blk_mq_rq_from_pdu(req);
int flags = MSG_DONTWAIT;
int ret;
@@ -1150,6 +1273,8 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
else
flags |= MSG_EOR;
+ nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id, rq);
+
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
@@ -2486,6 +2611,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
if (req->curr_bio && req->data_len)
nvme_tcp_init_iter(req, rq_data_dir(rq));
+ req->offloaded = false;
if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue))
req->pdu_len = req->data_len;
--
2.24.1
next prev parent reply other threads:[~2021-07-22 11:05 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-22 11:02 [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 01/36] net: Introduce direct data placement tcp offload Boris Pismenny
2021-07-22 11:26 ` Eric Dumazet
2021-07-22 12:18 ` Boris Pismenny
2021-07-22 13:10 ` Eric Dumazet
2021-07-22 13:33 ` Boris Pismenny
2021-07-22 13:39 ` Eric Dumazet
2021-07-22 14:02 ` Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 02/36] iov_iter: DDP copy to iter/pages Boris Pismenny
2021-07-22 13:31 ` Christoph Hellwig
2021-07-22 20:23 ` Boris Pismenny
2021-07-23 5:03 ` Christoph Hellwig
2021-07-23 5:21 ` Al Viro
2021-08-04 14:13 ` Or Gerlitz
2021-08-10 13:29 ` Or Gerlitz
2021-07-22 20:55 ` Al Viro
2021-07-22 11:02 ` [PATCH v5 net-next 03/36] net: skb copy(+hash) iterators for DDP offloads Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 04/36] net/tls: expose get_netdev_for_sock Boris Pismenny
2021-07-23 6:06 ` Christoph Hellwig
2021-08-04 13:26 ` Or Gerlitz
[not found] ` <20210804072918.17ba9cff@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com>
2021-08-04 15:07 ` Or Gerlitz
2021-08-10 13:25 ` Or Gerlitz
2021-07-22 11:02 ` [PATCH v5 net-next 05/36] nvme-tcp: Add DDP offload control path Boris Pismenny
2021-07-22 11:02 ` Boris Pismenny [this message]
2021-07-22 11:02 ` [PATCH v5 net-next 07/36] nvme-tcp: RX DDGST offload Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 08/36] nvme-tcp: Deal with netdevice DOWN events Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 09/36] net/mlx5: Header file changes for nvme-tcp offload Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 10/36] net/mlx5: Add 128B CQE for NVMEoTCP offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 11/36] net/mlx5e: TCP flow steering for nvme-tcp Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 12/36] net/mlx5e: NVMEoTCP offload initialization Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 13/36] net/mlx5e: KLM UMR helper macros Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 14/36] net/mlx5e: NVMEoTCP use KLM UMRs Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 15/36] net/mlx5e: NVMEoTCP queue init/teardown Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 16/36] net/mlx5e: NVMEoTCP async ddp invalidation Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 17/36] net/mlx5e: NVMEoTCP ddp setup and resync Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 18/36] net/mlx5e: NVMEoTCP, data-path for DDP+DDGST offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 19/36] net/mlx5e: NVMEoTCP statistics Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 20/36] Documentation: add ULP DDP offload documentation Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 21/36] net: drop ULP DDP HW offload feature if no CSUM offload feature Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 22/36] net: Add ulp_ddp_pdu_info struct Boris Pismenny
2021-07-23 19:42 ` Sagi Grimberg
2021-07-22 11:03 ` [PATCH v5 net-next 23/36] net: Add to ulp_ddp support for fallback flow Boris Pismenny
2021-07-23 6:09 ` Christoph Hellwig
2021-07-22 11:03 ` [PATCH v5 net-next 24/36] net: Add MSG_DDP_CRC flag Boris Pismenny
2021-07-22 14:23 ` Eric Dumazet
2021-07-22 11:03 ` [PATCH v5 net-next 25/36] nvme-tcp: TX DDGST offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 26/36] nvme-tcp: Mapping between Tx NVMEoTCP pdu and TCP sequence Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 27/36] mlx5e: make preparation in TLS code for NVMEoTCP CRC Tx offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 28/36] mlx5: Add sq state test bit for nvmeotcp Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 29/36] mlx5: Add support to NETIF_F_HW_TCP_DDP_CRC_TX feature Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 30/36] net/mlx5e: NVMEoTCP DDGST TX offload TIS Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 31/36] net/mlx5e: NVMEoTCP DDGST Tx offload queue init/teardown Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 32/36] net/mlx5e: NVMEoTCP DDGST TX BSF and PSV Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 33/36] net/mlx5e: NVMEoTCP DDGST TX Data path Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 34/36] net/mlx5e: NVMEoTCP DDGST TX handle OOO packets Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 35/36] net/mlx5e: NVMEoTCP DDGST TX offload optimization Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 36/36] net/mlx5e: NVMEoTCP DDGST TX statistics Boris Pismenny
2021-07-23 5:56 ` [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Christoph Hellwig
2021-07-23 19:58 ` Sagi Grimberg
2021-08-04 13:51 ` Or Gerlitz
2021-08-06 19:46 ` Sagi Grimberg
2021-08-10 13:37 ` Or Gerlitz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210722110325.371-7-borisp@nvidia.com \
--to=borisp@nvidia.com \
--cc=axboe@fb.com \
--cc=benishay@mellanox.com \
--cc=benishay@nvidia.com \
--cc=boris.pismenny@gmail.com \
--cc=borisp@mellanox.com \
--cc=davem@davemloft.net \
--cc=dsahern@gmail.com \
--cc=edumazet@google.com \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=netdev@vger.kernel.org \
--cc=ogerlitz@mellanox.com \
--cc=ogerlitz@nvidia.com \
--cc=saeedm@nvidia.com \
--cc=sagi@grimberg.me \
--cc=smalin@marvell.com \
--cc=viro@zeniv.linux.org.uk \
--cc=yorayz@mellanox.com \
--cc=yorayz@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).