linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] nvmet-rdma: Fix missing dma sync to nvme data structures
@ 2017-01-14 16:45 Parav Pandit
       [not found] ` <1484412337-10860-1-git-send-email-parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Parav Pandit @ 2017-01-14 16:45 UTC (permalink / raw)
  To: hch-jcswGhMUV9g, sagi-NQWnxTmZq1alnMjI0IkVqw,
	linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	dledford-H+wXaHxf7aLQT0dZR+AlfA
  Cc: parav-VPRAkNaXOzVWk0Htik3J/w

This patch performs dma sync operations on nvme_command,
inline page(s) and nvme_completion.

nvme_command and write cmd inline data is synced
(a) on receiving of the recv queue completion for cpu access.
(b) before posting recv wqe back to rdma adapter for device access.

nvme_completion is synced
(a) on receiving send completion for nvme_completion for cpu access.
(b) before posting send wqe to rdma adapter for device access.

Signed-off-by: Parav Pandit <parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Reviewed-by: Max Gurtovoy <maxg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
---
 drivers/nvme/target/rdma.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 6c1c368..da3d553 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,14 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
 {
 	struct ib_recv_wr *bad_wr;
 
+	ib_dma_sync_single_for_device(ndev->device,
+			cmd->sge[0].addr, sizeof(*cmd->nvme_cmd),
+			DMA_FROM_DEVICE);
+
+	if (cmd->sge[1].addr)
+		ib_dma_sync_single_for_device(ndev->device,
+				cmd->sge[1].addr, NVMET_RDMA_INLINE_DATA_SIZE,
+				DMA_FROM_DEVICE);
 	if (ndev->srq)
 		return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
 	return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -507,6 +515,10 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 	struct nvmet_rdma_rsp *rsp =
 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
 
+	ib_dma_sync_single_for_cpu(rsp->queue->dev->device,
+			rsp->send_sge.addr, sizeof(*rsp->req.rsp),
+			DMA_TO_DEVICE);
+
 	nvmet_rdma_release_rsp(rsp);
 
 	if (unlikely(wc->status != IB_WC_SUCCESS &&
@@ -538,6 +550,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
 		first_wr = &rsp->send_wr;
 
 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+	ib_dma_sync_single_for_device(rsp->queue->dev->device,
+			rsp->send_sge.addr, sizeof(*rsp->req.rsp),
+			DMA_TO_DEVICE);
+
 	if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
 		pr_err("sending cmd response failed\n");
 		nvmet_rdma_release_rsp(rsp);
@@ -698,6 +715,15 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
 	cmd->n_rdma = 0;
 	cmd->req.port = queue->port;
 
+	ib_dma_sync_single_for_cpu(queue->dev->device, cmd->cmd->sge[0].addr,
+			sizeof(*cmd->cmd->nvme_cmd), DMA_FROM_DEVICE);
+
+	if (cmd->cmd->sge[1].addr)
+		ib_dma_sync_single_for_cpu(queue->dev->device,
+				cmd->cmd->sge[1].addr,
+				NVMET_RDMA_INLINE_DATA_SIZE,
+				DMA_FROM_DEVICE);
+
 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
 			&queue->nvme_sq, &nvmet_rdma_ops))
 		return;
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 18+ messages in thread
* [PATCH] nvmet-rdma: Fix missing dma sync to nvme data structures
@ 2017-01-12 22:45 Parav Pandit
       [not found] ` <1484261109-3316-1-git-send-email-parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Parav Pandit @ 2017-01-12 22:45 UTC (permalink / raw)
  To: hch-jcswGhMUV9g, sagi-NQWnxTmZq1alnMjI0IkVqw,
	linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	dledford-H+wXaHxf7aLQT0dZR+AlfA
  Cc: parav-VPRAkNaXOzVWk0Htik3J/w

This patch performs dma sync operations on nvme_commmand,
inline page(s) and nvme_completion.

nvme_command and write cmd inline data is synced
(a) on receiving of the recv queue completion for cpu access.
(b) before posting recv wqe back to rdma adapter for device access.

nvme_completion is synced
(a) on receiving send completion for nvme_completion for cpu access.
(b) before posting send wqe to rdma adapter for device access.

Pushing this patch through linux-rdma tree as its more relavant with
Bart's changes for dma_map_ops of[1].

[1] https://patchwork.kernel.org/patch/9514085/

Signed-off-by: Parav Pandit <parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Reviewed-by: Max Gurtovoy <maxg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
---
 drivers/nvme/target/rdma.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a..c6468b3 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,14 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
 {
 	struct ib_recv_wr *bad_wr;
 
+	dma_sync_single_for_device(ndev->device->dma_device,
+			cmd->sge[0].addr, sizeof(*cmd->nvme_cmd),
+			DMA_FROM_DEVICE);
+
+	if (cmd->sge[1].addr)
+		dma_sync_single_for_device(ndev->device->dma_device,
+				cmd->sge[1].addr, NVMET_RDMA_INLINE_DATA_SIZE,
+				DMA_FROM_DEVICE);
 	if (ndev->srq)
 		return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
 	return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -507,6 +515,10 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 	struct nvmet_rdma_rsp *rsp =
 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
 
+	dma_sync_single_for_cpu(rsp->queue->dev->device->dma_device,
+			rsp->send_sge.addr, sizeof(*rsp->req.rsp),
+			DMA_TO_DEVICE);
+
 	nvmet_rdma_release_rsp(rsp);
 
 	if (unlikely(wc->status != IB_WC_SUCCESS &&
@@ -538,6 +550,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
 		first_wr = &rsp->send_wr;
 
 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+	dma_sync_single_for_device(rsp->queue->dev->device->dma_device,
+			rsp->send_sge.addr, sizeof(*rsp->req.rsp),
+			DMA_TO_DEVICE);
+
 	if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
 		pr_err("sending cmd response failed\n");
 		nvmet_rdma_release_rsp(rsp);
@@ -698,6 +715,16 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
 	cmd->n_rdma = 0;
 	cmd->req.port = queue->port;
 
+	dma_sync_single_for_cpu(queue->dev->device->dma_device,
+			cmd->cmd->sge[0].addr, sizeof(*cmd->cmd->nvme_cmd),
+			DMA_FROM_DEVICE);
+
+	if (cmd->cmd->sge[1].addr)
+		dma_sync_single_for_cpu(queue->dev->device->dma_device,
+				cmd->cmd->sge[1].addr,
+				NVMET_RDMA_INLINE_DATA_SIZE,
+				DMA_FROM_DEVICE);
+
 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
 			&queue->nvme_sq, &nvmet_rdma_ops))
 		return;
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 18+ messages in thread
* [PATCH] nvmet-rdma: Fix missing dma sync to nvme data structures
@ 2017-01-12 22:42 Parav Pandit
       [not found] ` <1484260948-3227-1-git-send-email-parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Parav Pandit @ 2017-01-12 22:42 UTC (permalink / raw)
  To: hch-jcswGhMUV9g, sagi-NQWnxTmZq1alnMjI0IkVqw,
	linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	dledford-H+wXaHxf7aLQT0dZR+AlfA
  Cc: parav-VPRAkNaXOzVWk0Htik3J/w

This patch performs dma sync operations on nvme_commmand,
inline page(s) and nvme_completion.

nvme_command and write cmd inline data is synced
(a) on receiving of the recv queue completion for cpu access.
(b) before posting recv wqe back to rdma adapter for device access.

nvme_completion is synced
(a) on receiving send completion for nvme_completion for cpu access.
(b) before posting send wqe to rdma adapter for device access.

Pushing this patch through linux-rdma git tree as its more relavant with
Bart's changes for dma_map_ops of[1].

[1] https://patchwork.kernel.org/patch/9514085/

Signed-off-by: Parav Pandit <parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
---
 drivers/nvme/target/rdma.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a..c6468b3 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,14 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
 {
 	struct ib_recv_wr *bad_wr;
 
+	dma_sync_single_for_device(ndev->device->dma_device,
+			cmd->sge[0].addr, sizeof(*cmd->nvme_cmd),
+			DMA_FROM_DEVICE);
+
+	if (cmd->sge[1].addr)
+		dma_sync_single_for_device(ndev->device->dma_device,
+				cmd->sge[1].addr, NVMET_RDMA_INLINE_DATA_SIZE,
+				DMA_FROM_DEVICE);
 	if (ndev->srq)
 		return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
 	return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -507,6 +515,10 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 	struct nvmet_rdma_rsp *rsp =
 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
 
+	dma_sync_single_for_cpu(rsp->queue->dev->device->dma_device,
+			rsp->send_sge.addr, sizeof(*rsp->req.rsp),
+			DMA_TO_DEVICE);
+
 	nvmet_rdma_release_rsp(rsp);
 
 	if (unlikely(wc->status != IB_WC_SUCCESS &&
@@ -538,6 +550,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
 		first_wr = &rsp->send_wr;
 
 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+	dma_sync_single_for_device(rsp->queue->dev->device->dma_device,
+			rsp->send_sge.addr, sizeof(*rsp->req.rsp),
+			DMA_TO_DEVICE);
+
 	if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
 		pr_err("sending cmd response failed\n");
 		nvmet_rdma_release_rsp(rsp);
@@ -698,6 +715,16 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
 	cmd->n_rdma = 0;
 	cmd->req.port = queue->port;
 
+	dma_sync_single_for_cpu(queue->dev->device->dma_device,
+			cmd->cmd->sge[0].addr, sizeof(*cmd->cmd->nvme_cmd),
+			DMA_FROM_DEVICE);
+
+	if (cmd->cmd->sge[1].addr)
+		dma_sync_single_for_cpu(queue->dev->device->dma_device,
+				cmd->cmd->sge[1].addr,
+				NVMET_RDMA_INLINE_DATA_SIZE,
+				DMA_FROM_DEVICE);
+
 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
 			&queue->nvme_sq, &nvmet_rdma_ops))
 		return;
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2017-01-16 17:30 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-01-14 16:45 [PATCH] nvmet-rdma: Fix missing dma sync to nvme data structures Parav Pandit
     [not found] ` <1484412337-10860-1-git-send-email-parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-01-14 21:07   ` Sagi Grimberg
2017-01-16 15:31   ` Christoph Hellwig
     [not found]     ` <20170116153109.GA16170-jcswGhMUV9g@public.gmane.org>
2017-01-16 17:18       ` Parav Pandit
     [not found]         ` <VI1PR0502MB3008EBE381C87D629A784279D17D0-o1MPJYiShExKsLr+rGaxW8DSnupUy6xnnBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-01-16 17:30           ` Christoph Hellwig
  -- strict thread matches above, loose matches on Subject: below --
2017-01-12 22:45 Parav Pandit
     [not found] ` <1484261109-3316-1-git-send-email-parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-01-13  7:44   ` Christoph Hellwig
     [not found]     ` <20170113074420.GA25814-jcswGhMUV9g@public.gmane.org>
2017-01-13 15:08       ` Parav Pandit
     [not found]         ` <VI1PR0502MB30086F641A07262801C02A28D1780-o1MPJYiShExKsLr+rGaxW8DSnupUy6xnnBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-01-13 19:42           ` Sagi Grimberg
     [not found]             ` <1f8d3db6-03b7-6c8f-8010-4ab964a87f3c-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-01-13 19:50               ` Parav Pandit
     [not found]                 ` <CAG53R5XGW-up2JtB_X29nPwbEGW14mvRBjnao8yOHczYUvvPgg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-01-13 20:01                   ` Sagi Grimberg
     [not found]                     ` <45c8815c-d9a1-4a74-8a70-0a0ea11d8201-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-01-14 16:12                       ` Parav Pandit
     [not found]                         ` <VI1PR0502MB3008E00F1FC094A35496C6DBD17B0-o1MPJYiShExKsLr+rGaxW8DSnupUy6xnnBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-01-14 21:00                           ` Sagi Grimberg
2017-01-14 16:18                       ` Christoph Hellwig
     [not found]                         ` <20170114161800.GA635-jcswGhMUV9g@public.gmane.org>
2017-01-14 21:04                           ` Sagi Grimberg
2017-01-13 19:35       ` Sagi Grimberg
2017-01-12 22:42 Parav Pandit
     [not found] ` <1484260948-3227-1-git-send-email-parav-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-01-12 22:45   ` Parav Pandit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).