public inbox for u-boot@lists.denx.de
 help / color / mirror / Atom feed
* [PATCH] nvme: Fix cache alignment
@ 2021-01-30 17:53 Marek Vasut
  2021-02-02  3:55 ` Bin Meng
  0 siblings, 1 reply; 22+ messages in thread
From: Marek Vasut @ 2021-01-30 17:53 UTC (permalink / raw)
  To: u-boot

The various structures in the driver are already correcty padded and
cache aligned in memory, however the cache operations are called on
the structure sizes, which themselves might not be cache aligned. Add
the necessary rounding to fix this, which permits the nvme to work on
arm64.

Signed-off-by: Marek Vasut <marek.vasut+renesas@gmail.com>
Cc: Bin Meng <bmeng.cn@gmail.com>
---
 drivers/nvme/nvme.c | 50 +++++++++++++++++++++++++++++----------------
 1 file changed, 32 insertions(+), 18 deletions(-)

diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
index 5d6331ad34..758415a53b 100644
--- a/drivers/nvme/nvme.c
+++ b/drivers/nvme/nvme.c
@@ -53,6 +53,27 @@ struct nvme_queue {
 	unsigned long cmdid_data[];
 };
 
+static void nvme_align_dcache_range(void *start, unsigned long size,
+				    unsigned long *s, unsigned long *e)
+{
+	*s = rounddown((uintptr_t)start, ARCH_DMA_MINALIGN);
+	*e = roundup((uintptr_t)start + size, ARCH_DMA_MINALIGN);
+}
+
+static void nvme_flush_dcache_range(void *start, unsigned long size)
+{
+	unsigned long s, e;
+	nvme_align_dcache_range(start, size, &s, &e);
+	flush_dcache_range(s, e);
+}
+
+static void nvme_invalidate_dcache_range(void *start, unsigned long size)
+{
+	unsigned long s, e;
+	nvme_align_dcache_range(start, size, &s, &e);
+	invalidate_dcache_range(s, e);
+}
+
 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
 {
 	u32 bit = enabled ? NVME_CSTS_RDY : 0;
@@ -129,8 +150,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
 	}
 	*prp2 = (ulong)dev->prp_pool;
 
-	flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
-			   dev->prp_entry_num * sizeof(u64));
+	nvme_flush_dcache_range(dev->prp_pool, dev->prp_entry_num * sizeof(u64));
 
 	return 0;
 }
@@ -144,10 +164,8 @@ static __le16 nvme_get_cmd_id(void)
 
 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
 {
-	u64 start = (ulong)&nvmeq->cqes[index];
-	u64 stop = start + sizeof(struct nvme_completion);
-
-	invalidate_dcache_range(start, stop);
+	nvme_invalidate_dcache_range(&nvmeq->cqes[index],
+				     sizeof(struct nvme_completion));
 
 	return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
 }
@@ -163,8 +181,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
 	u16 tail = nvmeq->sq_tail;
 
 	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
-	flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
-			   (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
+	nvme_flush_dcache_range(&nvmeq->sq_cmds[tail], sizeof(*cmd));
 
 	if (++tail == nvmeq->q_depth)
 		tail = 0;
@@ -338,8 +355,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
 	nvmeq->cq_phase = 1;
 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
-	flush_dcache_range((ulong)nvmeq->cqes,
-			   (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
+	nvme_flush_dcache_range(nvmeq->cqes, NVME_CQ_SIZE(nvmeq->q_depth));
 	dev->online_queues++;
 }
 
@@ -466,13 +482,13 @@ int nvme_identify(struct nvme_dev *dev, unsigned nsid,
 
 	c.identify.cns = cpu_to_le32(cns);
 
-	invalidate_dcache_range(dma_addr,
-				dma_addr + sizeof(struct nvme_id_ctrl));
+	nvme_invalidate_dcache_range((void *)dma_addr,
+				     sizeof(struct nvme_id_ctrl));
 
 	ret = nvme_submit_admin_cmd(dev, &c, NULL);
 	if (!ret)
-		invalidate_dcache_range(dma_addr,
-					dma_addr + sizeof(struct nvme_id_ctrl));
+		nvme_invalidate_dcache_range((void *)dma_addr,
+					     sizeof(struct nvme_id_ctrl));
 
 	return ret;
 }
@@ -729,8 +745,7 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
 	u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
 	u64 total_lbas = blkcnt;
 
-	flush_dcache_range((unsigned long)buffer,
-			   (unsigned long)buffer + total_len);
+	nvme_flush_dcache_range(buffer, total_len);
 
 	c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
 	c.rw.flags = 0;
@@ -767,8 +782,7 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
 	}
 
 	if (read)
-		invalidate_dcache_range((unsigned long)buffer,
-					(unsigned long)buffer + total_len);
+		nvme_invalidate_dcache_range(buffer, total_len);
 
 	return (total_len - temp_len) >> desc->log2blksz;
 }
-- 
2.29.2

^ permalink raw reply related	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2021-02-08 16:30 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-01-30 17:53 [PATCH] nvme: Fix cache alignment Marek Vasut
2021-02-02  3:55 ` Bin Meng
2021-02-02  8:05   ` Marek Vasut
2021-02-02  8:54     ` Bin Meng
2021-02-02  9:04       ` Marek Vasut
2021-02-02  9:12         ` Bin Meng
2021-02-02 16:09           ` Marek Vasut
2021-02-02 13:04   ` Andre Przywara
2021-02-02 16:08     ` Marek Vasut
2021-02-02 16:23   ` Andre Przywara
2021-02-02 21:18     ` Marek Vasut
2021-02-03 10:42       ` Andre Przywara
2021-02-03 13:08         ` Marek Vasut
2021-02-04 10:26           ` Andre Przywara
2021-02-04 16:57             ` Tom Rini
2021-02-07 18:20               ` Marek Vasut
2021-02-07 19:13                 ` Tom Rini
2021-02-08 13:32                   ` Andre Przywara
2021-02-08 15:11                     ` Bin Meng
2021-02-08 15:51                       ` Marek Vasut
2021-02-08 15:49                     ` Marek Vasut
2021-02-08 16:30                       ` Andre Przywara

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox