devicetree.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] Add sdma driver for HiSilicon Ascend platform
@ 2023-08-11  3:48 Guo Mengqi
  2023-08-11  3:48 ` [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support Guo Mengqi
  2023-08-11  3:48 ` [PATCH 2/2] dt-bindings: dma: hisi: Add bindings for Hisi Ascend sdma Guo Mengqi
  0 siblings, 2 replies; 8+ messages in thread
From: Guo Mengqi @ 2023-08-11  3:48 UTC (permalink / raw)
  To: vkoul, dmaengine, robh+dt, krzysztof.kozlowski+dt, conor+dt,
	devicetree
  Cc: guomengqi3, xuqiang36

This is for the System Direct Memory Access(SDMA) hardware
used by HiSilicon Ascend families. The dma controller supports
data transfers between memory and memory or between memory and device.

Guo Mengqi (2):
  dmaengine: Add HiSilicon Ascend SDMA engine support
  dt-bindings: dma: hisi: Add bindings for Hisi Ascend sdma

 .../bindings/dma/hisi,ascend-sdma.yaml        |  82 ++
 drivers/dma/Kconfig                           |   9 +
 drivers/dma/Makefile                          |   1 +
 drivers/dma/ascend_sdma.c                     | 810 ++++++++++++++++++
 4 files changed, 902 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml
 create mode 100644 drivers/dma/ascend_sdma.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support
  2023-08-11  3:48 [PATCH 0/2] Add sdma driver for HiSilicon Ascend platform Guo Mengqi
@ 2023-08-11  3:48 ` Guo Mengqi
  2023-08-11  9:50   ` kernel test robot
                     ` (2 more replies)
  2023-08-11  3:48 ` [PATCH 2/2] dt-bindings: dma: hisi: Add bindings for Hisi Ascend sdma Guo Mengqi
  1 sibling, 3 replies; 8+ messages in thread
From: Guo Mengqi @ 2023-08-11  3:48 UTC (permalink / raw)
  To: vkoul, dmaengine, robh+dt, krzysztof.kozlowski+dt, conor+dt,
	devicetree
  Cc: guomengqi3, xuqiang36

This patch adds a driver for HiSilicon Ascend SDMA engine.

The DMA controller can do transfers between device and memory
or memory to memory. Currently, the controller only support
single copy. Drives can pass a substreamid to the DMA engine,
which will enable transfers in user-space addresses.

Signed-off-by: Guo Mengqi <guomengqi3@huawei.com>
---
 drivers/dma/Kconfig       |   9 +
 drivers/dma/Makefile      |   1 +
 drivers/dma/ascend_sdma.c | 810 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 820 insertions(+)
 create mode 100644 drivers/dma/ascend_sdma.c

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 644c188d6a11..7f86fa6ddc83 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -243,6 +243,15 @@ config FSL_RAID
 	  the capability to offload memcpy, xor and pq computation
 	  for raid5/6.
 
+config HISI_ASCEND_SDMA
+	tristate "HiSilicon Ascend SDMA Engine support"
+	depends on ARM64 || COMPILE_TEST
+	depends on IOMMU_API && OF
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for HiSilicon Ascend SDMA engine.
+
 config HISI_DMA
 	tristate "HiSilicon DMA Engine support"
 	depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a4fd1ce29510..2052fc0594ef 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
 obj-$(CONFIG_ST_FDMA) += st_fdma.o
 obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
 obj-$(CONFIG_INTEL_LDMA) += lgm/
+obj-$(CONFIG_HISI_ASCEND_SDMA) += ascend_sdma.o
 
 obj-y += mediatek/
 obj-y += qcom/
diff --git a/drivers/dma/ascend_sdma.c b/drivers/dma/ascend_sdma.c
new file mode 100644
index 000000000000..a17cec1f6c41
--- /dev/null
+++ b/drivers/dma/ascend_sdma.c
@@ -0,0 +1,810 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2019-2022 HiSilicon Limited. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include "virt-dma.h"
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/iommu.h>
+
+#define SDMA_DEVICE_NAME "sdma"
+
+/* SDMA_CH_REGS */
+#define SDMAM_CH_CTRL_REG		0x0000
+#define SDMAM_CH_IIDR_REG		0x0004
+#define SDMAM_CH_TYPER_REG		0x0008
+#define SDMAM_CH_BYPASS_CTRL_REG	0x0014
+
+#define SDMAM_IRQ_STATUS_REG		0x000c
+#define SDMAM_IRQ_CTRL_REG		0x0010
+#define SDMAM_IRQ_IOC_MASK		(1U << 16)
+#define SDMAM_IRQ_IOE_MASK		(1U << 17)
+#define SDMAM_IRQ_ERR_MASK		(0xFFU << 20)
+
+#define SDMAM_CH_SQBASER_L_REG		0x0040
+#define SDMAM_CH_SQBASER_H_REG		0x0044
+#define SDMAM_CH_SQ_ATTR_REG		0x0048
+#define SDMAM_CH_SQTDBR_REG		0x004c
+#define SDMAM_CH_SQHDBR_REG		0x0050
+
+#define SDMAM_CH_CQBASER_L_REG		0x0080
+#define SDMAM_CH_CQBASER_H_REG		0x0084
+#define SDMAM_CH_CQ_ATTR_REG		0X0088
+#define SDMAM_CH_CQTDBR_REG		0x008c
+#define SDMAM_CH_CQHDBR_REG		0x0090
+
+/* SDMA_COMMON_REGS */
+#define SDMA_COMMON_DMA_AXUSER_REG0	0x0FE0
+#define SDMA_COMMON_DMA_AXUSER_REG1	0x0FE4
+#define SDMA_COMMON_DMA_AXUSER_REG2	0x0FE8
+#define SDMA_DFX_FEATURE_EN_REG		0x0FFC
+
+/* DFX REG */
+#define DFX_REG0 0x0400
+#define DFX_REG1 0x0404
+#define DFX_REG2 0x0408
+#define DFX_REG3 0x040C
+#define DFX_REG4 0x0410
+#define DFX_REG5 0x0414
+
+#define SDMA_IOMEM_SIZE			0x10000
+#define SDMA_CHANNELL_IOMEM_SIZE	0x1000
+
+#define SDMA_SQ_ENTRY_SIZE		32UL
+#define SDMA_CQ_ENTRY_SIZE		16UL
+
+/* must be pow of 2 */
+#define SDMA_SQ_LENGTH			(1U << 10)
+#define SDMA_CQ_LENGTH			(1U << 10)
+#define SDMA_SQ_SIZE			(SDMA_SQ_ENTRY_SIZE * SDMA_SQ_LENGTH)
+#define SDMA_CQ_SIZE			(SDMA_CQ_ENTRY_SIZE * SDMA_CQ_LENGTH)
+
+#define SDMA_MAX_COPY_SIZE		0x100000000UL
+#define SDMA_COPY_SIZE_MASK		0xFFFFFFFFUL
+
+#define SDMA_MAX_CHANNEL_NUM		64
+
+static unsigned int sdma_channel_iomem_size = SDMA_CHANNELL_IOMEM_SIZE;
+
+static u32 sdma_queue_count(u32 head, u32 tail, u32 len)
+{
+	return (tail - head) & (len - 1);
+}
+
+struct sdma_sq_entry {
+	u32 opcode          : 8;
+	u32 ie              : 1;
+	u32 sssv            : 1;
+	u32 dssv            : 1;
+	u32 sns             : 1;
+	u32 dns             : 1;
+	u32 qos             : 4;
+	u32 sro             : 1;
+	u32 dro             : 1;
+	u32 partid          : 4;
+	u32 mpamns          : 1;
+	u32 reserved0       : 8;
+	u32 src_streamid    : 16;
+	u32 src_substreamid : 16;
+	u32 dst_streamid    : 16;
+	u32 dst_substreamid : 16;
+	u32 length;
+	union {
+		u64 src_addr;
+		struct {
+			u32 src_addr_l;
+			u32 src_addr_h;
+		};
+	};
+	union {
+		u64 dst_addr;
+		struct {
+			u32 dst_addr_l;
+			u32 dst_addr_h;
+		};
+	};
+};
+
+struct sdma_cq_entry {
+	u32 reserved1;
+	u32 reserved2;
+	u32 sqhd      : 16;
+	u32 reserved3 : 16;
+	u32 reserved4 : 16;
+	u32 vld       : 1;
+	u32 status    : 15;
+};
+
+struct sdma_chan {
+	u16			idx;
+	u16			cq_vld;
+
+	u16			sq_head;
+	u16			sq_tail;
+	u16			cq_head;
+	u16			cq_tail;
+
+	/* must be page-aligned and continuous physical memory */
+	struct sdma_sq_entry	*sq_base;
+	struct sdma_cq_entry	*cq_base;
+
+	/* used for discrete copy, pre-alloc the buffer */
+	unsigned long		*src_addr;
+	unsigned long		*dst_addr;
+	unsigned long		*len;
+
+	void __iomem *io_base;
+
+	int id;
+	struct virt_dma_chan vc;
+	struct sdma_dev *sdev;
+
+	struct sdma_desc *desc;
+	char *name;
+
+	int pasid;
+};
+
+#define SDMA_DEVICE_NAME_LENGTH_MAX 20
+struct sdma_dev {
+	u16		idx;
+	u16		nr_channel;
+	DECLARE_BITMAP(channel_map, SDMA_MAX_CHANNEL_NUM);
+
+	struct	platform_device	*pdev;
+
+	u32		streamid;
+	void __iomem *io_base;
+
+	char	name[SDMA_DEVICE_NAME_LENGTH_MAX];
+
+	struct	dma_device dma_dev;
+	struct	sdma_chan *channels;
+};
+
+struct sdma_desc {
+	int pasid;
+
+	struct virt_dma_desc vd;
+	struct sdma_sq_entry entry;
+};
+
+static inline struct sdma_chan *to_sdma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct sdma_chan, vc.chan);
+}
+
+static inline struct sdma_desc *to_sdma_desc(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct sdma_desc, vd);
+}
+
+/* sdma supports sva transfer via iommu.
+ * client must first set the pasid.
+ */
+void set_sdma_channel_info(struct dma_chan *c, int pasid)
+{
+	struct sdma_chan *sc = to_sdma_chan(c);
+
+	sc->pasid = pasid;
+}
+EXPORT_SYMBOL_GPL(set_sdma_channel_info);
+
+struct sdma_hardware_info {
+	unsigned long	channel_map;
+	u64		base_addr; /* physical address */
+};
+
+#define CHANNEL_MAP_PROP "ascend_sdma_channel_map"
+#define CHANNEL_IOMEM_SIZE "ascend_sdma_channel_iomem_size"
+
+static int of_sdma_collect_info(struct platform_device *pdev, struct sdma_hardware_info *info)
+{
+	int ret;
+	u64 channel_map;
+	struct resource res;
+	struct device_node *np = pdev->dev.of_node;
+
+	ret = of_property_read_u64(np, CHANNEL_MAP_PROP, &channel_map);
+	if (ret < 0) {
+		pr_err("get " CHANNEL_MAP_PROP " info from dtb failed, %d\n", ret);
+		return ret;
+	}
+	info->channel_map = channel_map;
+
+	ret = of_property_read_u32(np, CHANNEL_IOMEM_SIZE, &sdma_channel_iomem_size);
+	if (ret < 0) {
+		pr_info("get SDMA_IOMEM_SIZE failed, make it be defaule size");
+		sdma_channel_iomem_size = SDMA_CHANNELL_IOMEM_SIZE;
+	}
+
+	ret = of_address_to_resource(np, 0, &res);
+	if (ret < 0) {
+		pr_err("get io_base info from dtb failed, %d\n", ret);
+		return ret;
+	}
+	info->base_addr = res.start;
+	if (resource_size(&res) != SDMA_IOMEM_SIZE)
+		pr_warn("reg size %#llx check failed, use %#x\n",
+				resource_size(&res), SDMA_IOMEM_SIZE);
+
+	return 0;
+}
+
+static int sdma_channel_alloc_sq_cq(struct sdma_chan *pchan)
+{
+	unsigned long *buf;
+
+	pchan->sq_base = (struct sdma_sq_entry *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+			get_order(SDMA_SQ_SIZE));
+	if (!pchan->sq_base) {
+		pr_err("channel%d: alloc sq_memory failed\n", pchan->idx);
+		return -ENOMEM;
+	}
+
+	pchan->cq_base = (struct sdma_cq_entry *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+			get_order(SDMA_CQ_SIZE));
+	if (!pchan->cq_base) {
+		pr_err("channel%d: alloc cq_memory failed\n", pchan->idx);
+		free_pages((unsigned long)pchan->sq_base, get_order(SDMA_SQ_SIZE));
+		return -ENOMEM;
+	}
+
+	buf = vmalloc(sizeof(unsigned long) * SDMA_SQ_LENGTH * 3);
+	if (!buf) {
+		pr_err("channel%d: alloc user_buf failed\n", pchan->idx);
+		free_pages((unsigned long)pchan->sq_base, get_order(SDMA_SQ_SIZE));
+		free_pages((unsigned long)pchan->cq_base, get_order(SDMA_CQ_SIZE));
+		return -ENOMEM;
+	}
+	pchan->src_addr = buf;
+	pchan->dst_addr = buf + SDMA_SQ_LENGTH;
+	pchan->len      = buf + SDMA_SQ_LENGTH * 2;
+
+	return 0;
+}
+
+static void sdma_free_all_sq_cq(struct sdma_dev *sdev)
+{
+	int i;
+	struct sdma_chan *pchan;
+
+	for (i = sdev->nr_channel - 1; i >= 0; i--) {
+		pchan = sdev->channels + i;
+		free_pages((unsigned long)pchan->sq_base, get_order(SDMA_SQ_SIZE));
+		free_pages((unsigned long)pchan->cq_base, get_order(SDMA_CQ_SIZE));
+		vfree(pchan->src_addr);
+	}
+}
+
+static void sdma_channel_set_val_mask_shift(struct sdma_chan *pchan,
+		int reg, u32 val, u32 mask, u32 shift)
+{
+	u32 reg_val = readl(pchan->io_base + reg);
+
+	reg_val = (reg_val & ~(mask << shift)) | ((val & mask) << shift);
+	writel(reg_val, pchan->io_base + reg);
+}
+
+static u32 sdma_channel_get_val_mask_shift(struct sdma_chan *pchan,
+		int reg, u32 mask, u32 shift)
+{
+	u32 reg_val = readl(pchan->io_base + reg);
+
+	return (reg_val >> shift) & mask;
+}
+
+static void sdma_channel_set_pause(struct sdma_chan *pchan)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_CTRL_REG, 1, 1, 1);
+}
+
+static bool sdma_channel_is_paused(struct sdma_chan *pchan)
+{
+	return sdma_channel_get_val_mask_shift(pchan, SDMAM_CH_CTRL_REG, 0xF, 16) == 3;
+}
+
+static bool sdma_channel_is_idle(struct sdma_chan *pchan)
+{
+	return sdma_channel_get_val_mask_shift(pchan, SDMAM_CH_CTRL_REG, 0xF, 16) == 0;
+}
+
+static bool sdma_channel_is_quiescent(struct sdma_chan *pchan)
+{
+	return sdma_channel_get_val_mask_shift(pchan, SDMAM_CH_CTRL_REG, 1, 31) == 1;
+}
+
+static void sdma_channel_write_reset(struct sdma_chan *pchan)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_CTRL_REG, 1, 1, 3);
+}
+
+static void sdma_channel_enable(struct sdma_chan *pchan)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_CTRL_REG, 1, 1, 0);
+}
+
+static void sdma_channel_set_doorbell_mode(struct sdma_chan *pchan)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_CTRL_REG, 0, 1, 9);
+}
+
+static void sdma_channel_disable(struct sdma_chan *pchan)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_CTRL_REG, 0, 1, 0);
+}
+
+static void sdma_channel_set_sq_size(struct sdma_chan *pchan, u32 size)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_SQ_ATTR_REG, size, 0xFFFF, 0);
+}
+
+static void sdma_channel_set_cq_size(struct sdma_chan *pchan, u32 size)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_CQ_ATTR_REG, size, 0xFFFF, 0);
+}
+
+static void sdma_channel_set_sq_tail(struct sdma_chan *pchan, u32 val)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_SQTDBR_REG, val, 0xFFFF, 0);
+}
+
+static u32 sdma_channel_get_sq_head(struct sdma_chan *pchan)
+{
+	return sdma_channel_get_val_mask_shift(pchan, SDMAM_CH_SQHDBR_REG, 0xFFFF, 0);
+}
+
+static void sdma_channel_set_cq_head(struct sdma_chan *pchan, u32 val)
+{
+	sdma_channel_set_val_mask_shift(pchan, SDMAM_CH_CQHDBR_REG, val, 0xFFFF, 0);
+}
+
+static u32 sdma_channel_get_cq_tail(struct sdma_chan *pchan)
+{
+	return sdma_channel_get_val_mask_shift(pchan, SDMAM_CH_CQTDBR_REG, 0xFFFF, 0);
+}
+
+static void sdma_channel_init(struct sdma_chan *pchan)
+{
+	void __iomem *io_base = pchan->io_base;
+	u64 sq_addr = virt_to_phys(pchan->sq_base);
+	u64 cq_addr = virt_to_phys(pchan->cq_base);
+
+	writel(sq_addr & 0xFFFFFFFF, io_base + SDMAM_CH_SQBASER_L_REG);
+	writel(sq_addr >> 32, io_base + SDMAM_CH_SQBASER_H_REG);
+	writel(cq_addr & 0xFFFFFFFF, io_base + SDMAM_CH_CQBASER_L_REG);
+	writel(cq_addr >> 32, io_base + SDMAM_CH_CQBASER_H_REG);
+
+	sdma_channel_set_sq_size(pchan, SDMA_SQ_LENGTH - 1);
+	sdma_channel_set_cq_size(pchan, SDMA_CQ_LENGTH - 1);
+	sdma_channel_set_sq_tail(pchan, 0);
+	sdma_channel_set_cq_head(pchan, 0);
+
+	pchan->cq_vld = 1;
+	sdma_channel_set_doorbell_mode(pchan);
+	sdma_channel_enable(pchan);
+}
+
+static void sdma_channel_reset(struct sdma_chan *pchan)
+{
+	int i = 0;
+
+	sdma_channel_set_pause(pchan);
+	while (!sdma_channel_is_paused(pchan))
+		if (++i > 10) {
+			pr_warn("the channel cannot get paused\n");
+			break;
+		}
+
+	i = 0;
+	while (!sdma_channel_is_quiescent(pchan))
+		if (++i > 10) {
+			pr_warn("the channel cannot get quiescent\n");
+			break;
+		}
+
+	i = 0;
+	sdma_channel_write_reset(pchan);
+	while (!sdma_channel_is_idle(pchan))
+		if (++i > 10) {
+			pr_warn("the channel cannot get idle\n");
+			break;
+		}
+	sdma_channel_disable(pchan);
+
+	pchan->sq_head = pchan->sq_tail = pchan->cq_head = pchan->cq_tail = 0;
+	sdma_channel_init(pchan);
+}
+
+static void sdma_destroy_channels(struct sdma_dev *sdev)
+{
+	sdma_free_all_sq_cq(sdev);
+}
+
+static void sdma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(to_sdma_desc(vd));
+}
+
+static int sdma_init_channels(struct sdma_dev *sdev, struct sdma_hardware_info *info)
+{
+	int ret = 0;
+	int i, nr_channel;
+	struct sdma_chan *pchan;
+	struct device *dev = &sdev->pdev->dev;
+
+	nr_channel = bitmap_weight(&info->channel_map, BITS_PER_LONG);
+
+	if (!nr_channel || nr_channel > SDMA_MAX_CHANNEL_NUM) {
+		pr_err("channel count (%d) invalid\n", nr_channel);
+		return -ENODEV;
+	}
+
+	sdev->channels = devm_kcalloc(dev, nr_channel,
+			sizeof(*sdev->channels), GFP_KERNEL);
+	if (!sdev->channels)
+		return -ENOMEM;
+
+	sdev->nr_channel = 0;
+	for (i = 0; sdev->nr_channel < nr_channel; i++) {
+		if (!(info->channel_map & (1UL << i)))
+			continue;
+
+		pchan = sdev->channels + sdev->nr_channel;
+		pchan->idx = sdev->nr_channel;
+		pchan->sdev = sdev;
+		pchan->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
+				dev_name(dev), i);
+
+		ret = sdma_channel_alloc_sq_cq(pchan);
+		if (ret < 0)
+			goto err_out;
+
+		sdev->nr_channel++;
+		pchan->io_base = sdev->io_base + i * sdma_channel_iomem_size;
+		vchan_init(&pchan->vc, &sdev->dma_dev);
+		pchan->vc.desc_free = sdma_desc_free;
+
+		sdma_channel_disable(pchan);
+		sdma_channel_init(pchan);
+
+		pr_info("hardware channel%d probed, idx %d\n", i, pchan->idx);
+	}
+
+	bitmap_set(sdev->channel_map, 0, sdev->nr_channel);
+
+	return 0;
+
+err_out:
+	sdma_destroy_channels(sdev);
+
+	return ret;
+}
+
+struct dma_async_tx_descriptor *sdma_prep_dma_memcpy(
+		struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+		size_t len, unsigned long flags)
+{
+	struct sdma_chan *sc = to_sdma_chan(chan);
+	struct sdma_desc *d;
+
+	d = kzalloc(sizeof(*d), GFP_NOWAIT);
+	if (!d)
+		return NULL;
+
+	d->pasid = sc->pasid;
+
+	d->entry.src_addr = src;
+	d->entry.dst_addr = dst;
+	d->entry.length = len;
+
+	return vchan_tx_prep(&sc->vc, &d->vd, flags);
+}
+
+static void sdma_error_handle(struct sdma_chan *sc)
+{
+	u32 cq_tail = sdma_channel_get_cq_tail(sc);
+
+	if (cq_tail < sc->cq_head)
+		sc->cq_vld ^= 1;
+	sc->cq_head = sc->cq_tail = cq_tail;
+	sc->sq_head = sdma_channel_get_sq_head(sc);
+}
+
+int sdma_terminate_all(struct dma_chan *chan)
+{
+	sdma_error_handle(to_sdma_chan(chan));
+	sdma_channel_reset(to_sdma_chan(chan));
+
+	return 0;
+}
+
+void sdma_synchronize(struct dma_chan *chan)
+{
+	struct sdma_chan *sc = to_sdma_chan(chan);
+
+	vchan_synchronize(&sc->vc);
+}
+
+enum dma_status sdma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie,
+		struct dma_tx_state *txstate)
+{
+	u32 cq_head, cq_tail, cq_count;
+	u32 irq_reg, ch_ctrl_reg;
+	struct sdma_cq_entry *cq_entry;
+	struct sdma_chan *sc = to_sdma_chan(chan);
+	enum dma_status ret = DMA_IN_PROGRESS;
+
+	dsb(sy);
+	irq_reg = readl(sc->io_base + SDMAM_IRQ_STATUS_REG);
+	ch_ctrl_reg = readl(sc->io_base + SDMAM_CH_CTRL_REG);
+
+	if (irq_reg & SDMAM_IRQ_IOC_MASK) {
+		writel(irq_reg, sc->io_base + SDMAM_IRQ_STATUS_REG);
+
+		cq_head = sc->cq_head;
+		cq_tail = sdma_channel_get_cq_tail(sc);
+		cq_count = sdma_queue_count(cq_head, cq_tail, SDMA_CQ_LENGTH);
+		if (!cq_count) {
+			pr_err("unexpected complete irq\n");
+			ret = DMA_ERROR;
+			goto out;
+		}
+
+		for (; cq_count; cq_count--) {
+			cq_entry = sc->cq_base + cq_head;
+			if (cq_entry->vld != sc->cq_vld || cq_entry->status) {
+				pr_err("cq_entry invalid, vld: %u, cq_vld: %u, status: %u\n",
+						cq_entry->vld, sc->cq_vld, cq_entry->status);
+				ret = DMA_ERROR;
+			}
+			if (++cq_head == SDMA_CQ_LENGTH) {
+				sc->cq_vld ^= 1;
+				cq_head = 0;
+			}
+		}
+
+		sc->cq_head = cq_head;
+		sdma_channel_set_cq_head(sc, cq_head);
+		sc->sq_head = sdma_channel_get_sq_head(sc);
+		sc->cq_tail = cq_tail;
+
+		if (ret != DMA_ERROR) {
+			ret = DMA_COMPLETE;
+			vchan_cookie_complete(&sc->desc->vd);
+		}
+	} else if (irq_reg & SDMAM_IRQ_IOE_MASK) {
+		writel(irq_reg, sc->io_base + SDMAM_IRQ_STATUS_REG);
+		pr_err("sdma ioe interrupt occur, status: %#x\n", irq_reg);
+		sdma_error_handle(sc);
+
+		ret = DMA_ERROR;
+	}
+
+out:
+	return ret;
+}
+
+static void sdma_start_transfer(struct sdma_chan *pchan)
+{
+	u16 sq_tail = pchan->sq_tail;
+	struct sdma_sq_entry *entry = pchan->sq_base + sq_tail;
+	struct sdma_desc *desc;
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&pchan->vc);
+	if (!vd) {
+		pchan->desc = NULL;
+		return;
+	}
+	list_del(&vd->node);
+	desc = to_sdma_desc(vd);
+	pchan->desc = desc;
+
+	memcpy(entry, &desc->entry, sizeof(struct sdma_sq_entry));
+
+	entry->src_streamid = pchan->sdev->streamid;
+	entry->dst_streamid = pchan->sdev->streamid;
+
+	entry->sns          = 1;
+	entry->dns          = 1;
+	entry->ie           = 0;
+	entry->partid       = 0;
+	entry->mpamns       = 1;
+	if (pchan->pasid) {
+		entry->sssv            = 1;
+		entry->dssv            = 1;
+		entry->src_substreamid = pchan->pasid;
+		entry->dst_substreamid = pchan->pasid;
+	} else {
+		entry->sssv = 0;
+		entry->dssv = 0;
+	}
+	sq_tail = (sq_tail + 1) & (SDMA_SQ_LENGTH - 1);
+	entry->ie = 1;
+
+	dmb(sy);
+	sdma_channel_set_sq_tail(pchan, sq_tail);
+	pchan->sq_tail = sq_tail;
+}
+
+void sdma_issue_pending(struct dma_chan *chan)
+{
+	struct sdma_chan *sc = to_sdma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->vc.lock, flags);
+
+	if (vchan_issue_pending(&sc->vc) && !sc->desc)
+		sdma_start_transfer(sc);
+
+	spin_unlock_irqrestore(&sc->vc.lock, flags);
+}
+
+void sdma_free_chan_resources(struct dma_chan *chan)
+{
+	struct sdma_chan *sc = to_sdma_chan(chan);
+
+	sc->desc = NULL;
+	sc->pasid = 0;
+}
+
+#define SDMA_BUSWIDTHS 1024
+static void sdma_init_dma_device(struct dma_device *dma_dev, struct device *dev)
+{
+	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+
+	dma_dev->device_issue_pending = sdma_issue_pending;
+	dma_dev->device_tx_status = sdma_tx_status;
+	dma_dev->device_terminate_all = sdma_terminate_all;
+	dma_dev->device_synchronize = sdma_synchronize;
+	dma_dev->device_free_chan_resources = sdma_free_chan_resources;
+	dma_dev->device_prep_dma_memcpy = sdma_prep_dma_memcpy;
+
+	dma_dev->src_addr_widths = SDMA_BUSWIDTHS;
+	dma_dev->dst_addr_widths = SDMA_BUSWIDTHS;
+	dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+
+	dma_dev->dev = dev;
+
+	INIT_LIST_HEAD(&dma_dev->channels);
+}
+
+static int sdma_device_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device *dev;
+	struct sdma_dev *sdev;
+	struct sdma_hardware_info info;
+
+	dev = &pdev->dev;
+
+	if (!pdev->dev.bus) {
+		pr_debug("the sdma dev bus is NULL\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (!pdev->dev.bus->iommu_ops) {
+		pr_debug("defer probe sdma device\n");
+		return -EPROBE_DEFER;
+	}
+
+	sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
+	if (!sdev) {
+		pr_err("alloc sdma_device failed\n");
+		return -ENOMEM;
+	}
+
+	sdev->pdev = pdev;
+	dev_set_drvdata(&pdev->dev, sdev);
+
+	ret = of_sdma_collect_info(pdev, &info);
+	if (ret < 0) {
+		pr_err("collect device info failed, %d\n", ret);
+		return ret;
+	}
+
+	sdev->io_base = ioremap(info.base_addr, SDMA_IOMEM_SIZE);
+	if (!sdev->io_base) {
+		pr_err("ioremap failed\n");
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	/* Fill in dmaengine */
+	sdma_init_dma_device(&sdev->dma_dev, dev);
+
+	ret = sdma_init_channels(sdev, &info);
+	if (ret < 0)
+		goto unmap_iobase;
+
+	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+	if (ret) {
+		pr_err("iommu failed to init iopf, %d\n", ret);
+		goto destroy_channels;
+	}
+
+	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+	if (ret) {
+		pr_err("iommu failed to init sva, %d\n", ret);
+		goto disable_iopf;
+	}
+
+	sdev->streamid = pdev->dev.iommu->fwspec->ids[0];
+
+	snprintf(sdev->name, SDMA_DEVICE_NAME_LENGTH_MAX, "sdma%d", sdev->idx);
+	pr_info("%s device probe success\n", sdev->name);
+
+	ret = dma_async_device_register(&sdev->dma_dev);
+	if (ret) {
+		dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
+		goto disable_sva;
+	}
+
+	return 0;
+
+disable_sva:
+	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+disable_iopf:
+	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+destroy_channels:
+	sdma_destroy_channels(sdev);
+unmap_iobase:
+	iounmap(sdev->io_base);
+	return ret;
+}
+
+static int sdma_device_remove(struct platform_device *pdev)
+{
+	struct sdma_dev *psdma_dev = dev_get_drvdata(&pdev->dev);
+
+	dma_async_device_unregister(&psdma_dev->dma_dev);
+
+	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+
+	sdma_destroy_channels(psdma_dev);
+
+	iounmap(psdma_dev->io_base);
+
+	return 0;
+}
+
+static const struct of_device_id sdma_of_match[] = {
+	{ .compatible = "hisilicon,sdma" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sdma_of_match);
+
+static struct platform_driver sdma_driver = {
+	.probe    = sdma_device_probe,
+	.remove   = sdma_device_remove,
+	.driver   = {
+		.name           = SDMA_DEVICE_NAME,
+		.of_match_table = sdma_of_match,
+	},
+};
+
+static int __init sdma_driver_init(void)
+{
+	return platform_driver_register(&sdma_driver);
+}
+module_init(sdma_driver_init);
+
+static void sdma_driver_exit(void)
+{
+	platform_driver_unregister(&sdma_driver);
+}
+module_exit(sdma_driver_exit);
+
+MODULE_DESCRIPTION("SDMA controller driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wang Wensheng <wangwensheng4@huawei.com>");
+MODULE_AUTHOR("Guo Mengqi <guomengqi3@huawei.com>");
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] dt-bindings: dma: hisi: Add bindings for Hisi Ascend sdma
  2023-08-11  3:48 [PATCH 0/2] Add sdma driver for HiSilicon Ascend platform Guo Mengqi
  2023-08-11  3:48 ` [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support Guo Mengqi
@ 2023-08-11  3:48 ` Guo Mengqi
  2023-08-14  8:51   ` Krzysztof Kozlowski
  1 sibling, 1 reply; 8+ messages in thread
From: Guo Mengqi @ 2023-08-11  3:48 UTC (permalink / raw)
  To: vkoul, dmaengine, robh+dt, krzysztof.kozlowski+dt, conor+dt,
	devicetree
  Cc: guomengqi3, xuqiang36

Add device-tree binding documentation for the Hisi Ascend sdma
controller.

Signed-off-by: Guo Mengqi <guomengqi3@huawei.com>
---
 .../bindings/dma/hisi,ascend-sdma.yaml        | 82 +++++++++++++++++++
 1 file changed, 82 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml

diff --git a/Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml b/Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml
new file mode 100644
index 000000000000..beb2b3597f4d
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/hisi,ascend-sdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HISI Ascend System DMA (SDMA) controller
+
+description: |
+  The Ascend SDMA controller is used for transferring data
+  in system memory. It utilizes IOMMU SVA feature and accepts
+  virtual address from user process.
+
+maintainers:
+  - Guo Mengqi <guomengqi3@huawei.com>
+
+allOf:
+  - $ref: dma-controller.yaml#
+
+properties:
+  compatible:
+    const: hisilicon,sdma
+
+  reg:
+    maxItems: 1
+
+  '#dma-cells':
+    const: 1
+    description:
+      Clients specify a single cell with channel number.
+
+  ascend_sdma_channel_map:
+    description: |
+      bitmap, each bit stands for a channel that is allowed to
+      use by this system. Maximum 32 bits.
+    maximum: 0xffffffff
+
+  ascend_sdma_channel_iomem_size:
+    description: |
+      depends on different platforms to be released. There are
+      currently two possible values. A default value is used if
+      the property is not set.
+      - enum:
+        - 0x400
+        - 0x1000
+
+  iommus:
+    maxItems: 1
+
+  pasid-num-bits:
+    description: |
+      sdma utilizes iommu sva feature to transfer user space data.
+      It act as a basic dma controller if not bound to user space.
+    const: 0x10
+
+  dma-coherent: true
+
+  dma-can-stall: true
+
+required:
+  - compatible
+  - reg
+  - ascend_sdma_channel_map
+  - '#dma-cells'
+  - iommus
+
+additionalProperties: false
+
+examples:
+  - |
+    sdma: dma-controller@880E0000 {
+      compatible = "hisilicon,sdma";
+        reg = <0x880e0000 0x10000>;
+        ascend_sdma_channel_map = <0xff00>;
+        iommus = <&smmu 0x7f46>;
+        pasid-num-bits = <0x10>;
+        dma-coherent;
+        dma-can-stall;
+        #dma-cells = <1>;
+    };
+
+...
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support
  2023-08-11  3:48 ` [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support Guo Mengqi
@ 2023-08-11  9:50   ` kernel test robot
  2023-08-11 11:55   ` kernel test robot
  2023-08-14  8:54   ` Krzysztof Kozlowski
  2 siblings, 0 replies; 8+ messages in thread
From: kernel test robot @ 2023-08-11  9:50 UTC (permalink / raw)
  To: Guo Mengqi, vkoul, dmaengine, robh+dt, krzysztof.kozlowski+dt,
	conor+dt, devicetree
  Cc: oe-kbuild-all, guomengqi3, xuqiang36

Hi Guo,

kernel test robot noticed the following build warnings:

[auto build test WARNING on vkoul-dmaengine/next]
[also build test WARNING on linus/master v6.5-rc5 next-20230809]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Guo-Mengqi/dmaengine-Add-HiSilicon-Ascend-SDMA-engine-support/20230811-115823
base:   https://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git next
patch link:    https://lore.kernel.org/r/20230811034822.107229-2-guomengqi3%40huawei.com
patch subject: [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support
config: csky-randconfig-r003-20230811 (https://download.01.org/0day-ci/archive/20230811/202308111712.7M4TmcC2-lkp@intel.com/config)
compiler: csky-linux-gcc (GCC) 12.3.0
reproduce: (https://download.01.org/0day-ci/archive/20230811/202308111712.7M4TmcC2-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202308111712.7M4TmcC2-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/dma/ascend_sdma.c:191:6: warning: no previous prototype for 'set_sdma_channel_info' [-Wmissing-prototypes]
     191 | void set_sdma_channel_info(struct dma_chan *c, int pasid)
         |      ^~~~~~~~~~~~~~~~~~~~~
   In file included from include/linux/kernel.h:30,
                    from drivers/dma/ascend_sdma.c:4:
   drivers/dma/ascend_sdma.c: In function 'of_sdma_collect_info':
>> include/linux/kern_levels.h:5:25: warning: format '%llx' expects argument of type 'long long unsigned int', but argument 2 has type 'resource_size_t' {aka 'unsigned int'} [-Wformat=]
       5 | #define KERN_SOH        "\001"          /* ASCII Start Of Header */
         |                         ^~~~~~
   include/linux/printk.h:427:25: note: in definition of macro 'printk_index_wrap'
     427 |                 _p_func(_fmt, ##__VA_ARGS__);                           \
         |                         ^~~~
   include/linux/printk.h:508:9: note: in expansion of macro 'printk'
     508 |         printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
         |         ^~~~~~
   include/linux/kern_levels.h:12:25: note: in expansion of macro 'KERN_SOH'
      12 | #define KERN_WARNING    KERN_SOH "4"    /* warning conditions */
         |                         ^~~~~~~~
   include/linux/printk.h:508:16: note: in expansion of macro 'KERN_WARNING'
     508 |         printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
         |                ^~~~~~~~~~~~
   drivers/dma/ascend_sdma.c:234:17: note: in expansion of macro 'pr_warn'
     234 |                 pr_warn("reg size %#llx check failed, use %#x\n",
         |                 ^~~~~~~
   drivers/dma/ascend_sdma.c: At top level:
>> drivers/dma/ascend_sdma.c:490:33: warning: no previous prototype for 'sdma_prep_dma_memcpy' [-Wmissing-prototypes]
     490 | struct dma_async_tx_descriptor *sdma_prep_dma_memcpy(
         |                                 ^~~~~~~~~~~~~~~~~~~~
>> drivers/dma/ascend_sdma.c:520:5: warning: no previous prototype for 'sdma_terminate_all' [-Wmissing-prototypes]
     520 | int sdma_terminate_all(struct dma_chan *chan)
         |     ^~~~~~~~~~~~~~~~~~
>> drivers/dma/ascend_sdma.c:528:6: warning: no previous prototype for 'sdma_synchronize' [-Wmissing-prototypes]
     528 | void sdma_synchronize(struct dma_chan *chan)
         |      ^~~~~~~~~~~~~~~~
>> drivers/dma/ascend_sdma.c:535:17: warning: no previous prototype for 'sdma_tx_status' [-Wmissing-prototypes]
     535 | enum dma_status sdma_tx_status(struct dma_chan *chan,
         |                 ^~~~~~~~~~~~~~
   drivers/dma/ascend_sdma.c: In function 'sdma_tx_status':
   drivers/dma/ascend_sdma.c:545:9: error: implicit declaration of function 'dsb' [-Werror=implicit-function-declaration]
     545 |         dsb(sy);
         |         ^~~
   drivers/dma/ascend_sdma.c:545:13: error: 'sy' undeclared (first use in this function); did you mean 'sc'?
     545 |         dsb(sy);
         |             ^~
         |             sc
   drivers/dma/ascend_sdma.c:545:13: note: each undeclared identifier is reported only once for each function it appears in
>> drivers/dma/ascend_sdma.c:540:22: warning: variable 'ch_ctrl_reg' set but not used [-Wunused-but-set-variable]
     540 |         u32 irq_reg, ch_ctrl_reg;
         |                      ^~~~~~~~~~~
   drivers/dma/ascend_sdma.c: In function 'sdma_start_transfer':
   drivers/dma/ascend_sdma.c:633:9: error: implicit declaration of function 'dmb'; did you mean 'rmb'? [-Werror=implicit-function-declaration]
     633 |         dmb(sy);
         |         ^~~
         |         rmb
   drivers/dma/ascend_sdma.c:633:13: error: 'sy' undeclared (first use in this function); did you mean 's8'?
     633 |         dmb(sy);
         |             ^~
         |             s8
   drivers/dma/ascend_sdma.c: At top level:
>> drivers/dma/ascend_sdma.c:638:6: warning: no previous prototype for 'sdma_issue_pending' [-Wmissing-prototypes]
     638 | void sdma_issue_pending(struct dma_chan *chan)
         |      ^~~~~~~~~~~~~~~~~~
>> drivers/dma/ascend_sdma.c:651:6: warning: no previous prototype for 'sdma_free_chan_resources' [-Wmissing-prototypes]
     651 | void sdma_free_chan_resources(struct dma_chan *chan)
         |      ^~~~~~~~~~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors


vim +/set_sdma_channel_info +191 drivers/dma/ascend_sdma.c

   187	
   188	/* sdma supports sva transfer via iommu.
   189	 * client must first set the pasid.
   190	 */
 > 191	void set_sdma_channel_info(struct dma_chan *c, int pasid)
   192	{
   193		struct sdma_chan *sc = to_sdma_chan(c);
   194	
   195		sc->pasid = pasid;
   196	}
   197	EXPORT_SYMBOL_GPL(set_sdma_channel_info);
   198	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support
  2023-08-11  3:48 ` [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support Guo Mengqi
  2023-08-11  9:50   ` kernel test robot
@ 2023-08-11 11:55   ` kernel test robot
  2023-08-14  8:54   ` Krzysztof Kozlowski
  2 siblings, 0 replies; 8+ messages in thread
From: kernel test robot @ 2023-08-11 11:55 UTC (permalink / raw)
  To: Guo Mengqi, vkoul, dmaengine, robh+dt, krzysztof.kozlowski+dt,
	conor+dt, devicetree
  Cc: oe-kbuild-all, guomengqi3, xuqiang36

Hi Guo,

kernel test robot noticed the following build errors:

[auto build test ERROR on vkoul-dmaengine/next]
[also build test ERROR on linus/master v6.5-rc5 next-20230809]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Guo-Mengqi/dmaengine-Add-HiSilicon-Ascend-SDMA-engine-support/20230811-115823
base:   https://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git next
patch link:    https://lore.kernel.org/r/20230811034822.107229-2-guomengqi3%40huawei.com
patch subject: [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support
config: csky-randconfig-r003-20230811 (https://download.01.org/0day-ci/archive/20230811/202308111941.5SysB2x2-lkp@intel.com/config)
compiler: csky-linux-gcc (GCC) 12.3.0
reproduce: (https://download.01.org/0day-ci/archive/20230811/202308111941.5SysB2x2-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202308111941.5SysB2x2-lkp@intel.com/

All errors (new ones prefixed by >>):

   drivers/dma/ascend_sdma.c:191:6: warning: no previous prototype for 'set_sdma_channel_info' [-Wmissing-prototypes]
     191 | void set_sdma_channel_info(struct dma_chan *c, int pasid)
         |      ^~~~~~~~~~~~~~~~~~~~~
   In file included from include/linux/kernel.h:30,
                    from drivers/dma/ascend_sdma.c:4:
   drivers/dma/ascend_sdma.c: In function 'of_sdma_collect_info':
   include/linux/kern_levels.h:5:25: warning: format '%llx' expects argument of type 'long long unsigned int', but argument 2 has type 'resource_size_t' {aka 'unsigned int'} [-Wformat=]
       5 | #define KERN_SOH        "\001"          /* ASCII Start Of Header */
         |                         ^~~~~~
   include/linux/printk.h:427:25: note: in definition of macro 'printk_index_wrap'
     427 |                 _p_func(_fmt, ##__VA_ARGS__);                           \
         |                         ^~~~
   include/linux/printk.h:508:9: note: in expansion of macro 'printk'
     508 |         printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
         |         ^~~~~~
   include/linux/kern_levels.h:12:25: note: in expansion of macro 'KERN_SOH'
      12 | #define KERN_WARNING    KERN_SOH "4"    /* warning conditions */
         |                         ^~~~~~~~
   include/linux/printk.h:508:16: note: in expansion of macro 'KERN_WARNING'
     508 |         printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
         |                ^~~~~~~~~~~~
   drivers/dma/ascend_sdma.c:234:17: note: in expansion of macro 'pr_warn'
     234 |                 pr_warn("reg size %#llx check failed, use %#x\n",
         |                 ^~~~~~~
   drivers/dma/ascend_sdma.c: At top level:
   drivers/dma/ascend_sdma.c:490:33: warning: no previous prototype for 'sdma_prep_dma_memcpy' [-Wmissing-prototypes]
     490 | struct dma_async_tx_descriptor *sdma_prep_dma_memcpy(
         |                                 ^~~~~~~~~~~~~~~~~~~~
   drivers/dma/ascend_sdma.c:520:5: warning: no previous prototype for 'sdma_terminate_all' [-Wmissing-prototypes]
     520 | int sdma_terminate_all(struct dma_chan *chan)
         |     ^~~~~~~~~~~~~~~~~~
   drivers/dma/ascend_sdma.c:528:6: warning: no previous prototype for 'sdma_synchronize' [-Wmissing-prototypes]
     528 | void sdma_synchronize(struct dma_chan *chan)
         |      ^~~~~~~~~~~~~~~~
   drivers/dma/ascend_sdma.c:535:17: warning: no previous prototype for 'sdma_tx_status' [-Wmissing-prototypes]
     535 | enum dma_status sdma_tx_status(struct dma_chan *chan,
         |                 ^~~~~~~~~~~~~~
   drivers/dma/ascend_sdma.c: In function 'sdma_tx_status':
>> drivers/dma/ascend_sdma.c:545:9: error: implicit declaration of function 'dsb' [-Werror=implicit-function-declaration]
     545 |         dsb(sy);
         |         ^~~
>> drivers/dma/ascend_sdma.c:545:13: error: 'sy' undeclared (first use in this function); did you mean 'sc'?
     545 |         dsb(sy);
         |             ^~
         |             sc
   drivers/dma/ascend_sdma.c:545:13: note: each undeclared identifier is reported only once for each function it appears in
   drivers/dma/ascend_sdma.c:540:22: warning: variable 'ch_ctrl_reg' set but not used [-Wunused-but-set-variable]
     540 |         u32 irq_reg, ch_ctrl_reg;
         |                      ^~~~~~~~~~~
   drivers/dma/ascend_sdma.c: In function 'sdma_start_transfer':
>> drivers/dma/ascend_sdma.c:633:9: error: implicit declaration of function 'dmb'; did you mean 'rmb'? [-Werror=implicit-function-declaration]
     633 |         dmb(sy);
         |         ^~~
         |         rmb
   drivers/dma/ascend_sdma.c:633:13: error: 'sy' undeclared (first use in this function); did you mean 's8'?
     633 |         dmb(sy);
         |             ^~
         |             s8
   drivers/dma/ascend_sdma.c: At top level:
   drivers/dma/ascend_sdma.c:638:6: warning: no previous prototype for 'sdma_issue_pending' [-Wmissing-prototypes]
     638 | void sdma_issue_pending(struct dma_chan *chan)
         |      ^~~~~~~~~~~~~~~~~~
   drivers/dma/ascend_sdma.c:651:6: warning: no previous prototype for 'sdma_free_chan_resources' [-Wmissing-prototypes]
     651 | void sdma_free_chan_resources(struct dma_chan *chan)
         |      ^~~~~~~~~~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors


vim +/dsb +545 drivers/dma/ascend_sdma.c

   527	
 > 528	void sdma_synchronize(struct dma_chan *chan)
   529	{
   530		struct sdma_chan *sc = to_sdma_chan(chan);
   531	
   532		vchan_synchronize(&sc->vc);
   533	}
   534	
 > 535	enum dma_status sdma_tx_status(struct dma_chan *chan,
   536			dma_cookie_t cookie,
   537			struct dma_tx_state *txstate)
   538	{
   539		u32 cq_head, cq_tail, cq_count;
   540		u32 irq_reg, ch_ctrl_reg;
   541		struct sdma_cq_entry *cq_entry;
   542		struct sdma_chan *sc = to_sdma_chan(chan);
   543		enum dma_status ret = DMA_IN_PROGRESS;
   544	
 > 545		dsb(sy);
   546		irq_reg = readl(sc->io_base + SDMAM_IRQ_STATUS_REG);
   547		ch_ctrl_reg = readl(sc->io_base + SDMAM_CH_CTRL_REG);
   548	
   549		if (irq_reg & SDMAM_IRQ_IOC_MASK) {
   550			writel(irq_reg, sc->io_base + SDMAM_IRQ_STATUS_REG);
   551	
   552			cq_head = sc->cq_head;
   553			cq_tail = sdma_channel_get_cq_tail(sc);
   554			cq_count = sdma_queue_count(cq_head, cq_tail, SDMA_CQ_LENGTH);
   555			if (!cq_count) {
   556				pr_err("unexpected complete irq\n");
   557				ret = DMA_ERROR;
   558				goto out;
   559			}
   560	
   561			for (; cq_count; cq_count--) {
   562				cq_entry = sc->cq_base + cq_head;
   563				if (cq_entry->vld != sc->cq_vld || cq_entry->status) {
   564					pr_err("cq_entry invalid, vld: %u, cq_vld: %u, status: %u\n",
   565							cq_entry->vld, sc->cq_vld, cq_entry->status);
   566					ret = DMA_ERROR;
   567				}
   568				if (++cq_head == SDMA_CQ_LENGTH) {
   569					sc->cq_vld ^= 1;
   570					cq_head = 0;
   571				}
   572			}
   573	
   574			sc->cq_head = cq_head;
   575			sdma_channel_set_cq_head(sc, cq_head);
   576			sc->sq_head = sdma_channel_get_sq_head(sc);
   577			sc->cq_tail = cq_tail;
   578	
   579			if (ret != DMA_ERROR) {
   580				ret = DMA_COMPLETE;
   581				vchan_cookie_complete(&sc->desc->vd);
   582			}
   583		} else if (irq_reg & SDMAM_IRQ_IOE_MASK) {
   584			writel(irq_reg, sc->io_base + SDMAM_IRQ_STATUS_REG);
   585			pr_err("sdma ioe interrupt occur, status: %#x\n", irq_reg);
   586			sdma_error_handle(sc);
   587	
   588			ret = DMA_ERROR;
   589		}
   590	
   591	out:
   592		return ret;
   593	}
   594	
   595	static void sdma_start_transfer(struct sdma_chan *pchan)
   596	{
   597		u16 sq_tail = pchan->sq_tail;
   598		struct sdma_sq_entry *entry = pchan->sq_base + sq_tail;
   599		struct sdma_desc *desc;
   600		struct virt_dma_desc *vd;
   601	
   602		vd = vchan_next_desc(&pchan->vc);
   603		if (!vd) {
   604			pchan->desc = NULL;
   605			return;
   606		}
   607		list_del(&vd->node);
   608		desc = to_sdma_desc(vd);
   609		pchan->desc = desc;
   610	
   611		memcpy(entry, &desc->entry, sizeof(struct sdma_sq_entry));
   612	
   613		entry->src_streamid = pchan->sdev->streamid;
   614		entry->dst_streamid = pchan->sdev->streamid;
   615	
   616		entry->sns          = 1;
   617		entry->dns          = 1;
   618		entry->ie           = 0;
   619		entry->partid       = 0;
   620		entry->mpamns       = 1;
   621		if (pchan->pasid) {
   622			entry->sssv            = 1;
   623			entry->dssv            = 1;
   624			entry->src_substreamid = pchan->pasid;
   625			entry->dst_substreamid = pchan->pasid;
   626		} else {
   627			entry->sssv = 0;
   628			entry->dssv = 0;
   629		}
   630		sq_tail = (sq_tail + 1) & (SDMA_SQ_LENGTH - 1);
   631		entry->ie = 1;
   632	
 > 633		dmb(sy);
   634		sdma_channel_set_sq_tail(pchan, sq_tail);
   635		pchan->sq_tail = sq_tail;
   636	}
   637	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] dt-bindings: dma: hisi: Add bindings for Hisi Ascend sdma
  2023-08-11  3:48 ` [PATCH 2/2] dt-bindings: dma: hisi: Add bindings for Hisi Ascend sdma Guo Mengqi
@ 2023-08-14  8:51   ` Krzysztof Kozlowski
  0 siblings, 0 replies; 8+ messages in thread
From: Krzysztof Kozlowski @ 2023-08-14  8:51 UTC (permalink / raw)
  To: Guo Mengqi, vkoul, dmaengine, robh+dt, krzysztof.kozlowski+dt,
	conor+dt, devicetree
  Cc: xuqiang36

On 11/08/2023 05:48, Guo Mengqi wrote:
> Add device-tree binding documentation for the Hisi Ascend sdma
> controller.
> 
> Signed-off-by: Guo Mengqi <guomengqi3@huawei.com>
> ---
>  .../bindings/dma/hisi,ascend-sdma.yaml        | 82 +++++++++++++++++++
>  1 file changed, 82 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml
> 
> diff --git a/Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml b/Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml
> new file mode 100644
> index 000000000000..beb2b3597f4d
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/hisi,ascend-sdma.yaml
> @@ -0,0 +1,82 @@
> +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
> +%YAML 1.2
> +---
> +$id: http://devicetree.org/schemas/dma/hisi,ascend-sdma.yaml#
> +$schema: http://devicetree.org/meta-schemas/core.yaml#
> +
> +title: HISI Ascend System DMA (SDMA) controller
> +
> +description: |
> +  The Ascend SDMA controller is used for transferring data

What is Ascend? SoC? Family? Model?

> +  in system memory. It utilizes IOMMU SVA feature and accepts
> +  virtual address from user process.
> +
> +maintainers:
> +  - Guo Mengqi <guomengqi3@huawei.com>
> +
> +allOf:
> +  - $ref: dma-controller.yaml#
> +
> +properties:
> +  compatible:
> +    const: hisilicon,sdma

Way too generic compatible. It must be *model specific*.

> +
> +  reg:
> +    maxItems: 1
> +
> +  '#dma-cells':
> +    const: 1
> +    description:
> +      Clients specify a single cell with channel number.
> +
> +  ascend_sdma_channel_map:

Missing vendor prefix, no underscores in property names, missing type/$ref.


I doubt this was ever tested.

> +    description: |
> +      bitmap, each bit stands for a channel that is allowed to
> +      use by this system. Maximum 32 bits.
> +    maximum: 0xffffffff
> +
> +  ascend_sdma_channel_iomem_size:
> +    description: |
> +      depends on different platforms to be released. There are

Ah, so compatible is not specific enough? No, please make compatibles
specific and drop this property.

> +      currently two possible values. A default value is used if
> +      the property is not set.
> +      - enum:
> +        - 0x400
> +        - 0x1000
> +
> +  iommus:
> +    maxItems: 1
> +
> +  pasid-num-bits:
> +    description: |
> +      sdma utilizes iommu sva feature to transfer user space data.
> +      It act as a basic dma controller if not bound to user space.
> +    const: 0x10
> +
> +  dma-coherent: true
> +
> +  dma-can-stall: true
> +
> +required:
> +  - compatible
> +  - reg
> +  - ascend_sdma_channel_map
> +  - '#dma-cells'
> +  - iommus
> +
> +additionalProperties: false
> +
> +examples:
> +  - |
> +    sdma: dma-controller@880E0000 {

Use lowercase hex and drop unused label.

> +      compatible = "hisilicon,sdma";
> +        reg = <0x880e0000 0x10000>;

Broken indentation.




Best regards,
Krzysztof


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support
  2023-08-11  3:48 ` [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support Guo Mengqi
  2023-08-11  9:50   ` kernel test robot
  2023-08-11 11:55   ` kernel test robot
@ 2023-08-14  8:54   ` Krzysztof Kozlowski
  2023-08-18 10:20     ` guomengqi (A)
  2 siblings, 1 reply; 8+ messages in thread
From: Krzysztof Kozlowski @ 2023-08-14  8:54 UTC (permalink / raw)
  To: Guo Mengqi, vkoul, dmaengine, robh+dt, krzysztof.kozlowski+dt,
	conor+dt, devicetree
  Cc: xuqiang36

On 11/08/2023 05:48, Guo Mengqi wrote:
> This patch adds a driver for HiSilicon Ascend SDMA engine.
> 
> The DMA controller can do transfers between device and memory
> or memory to memory. Currently, the controller only support
> single copy. Drives can pass a substreamid to the DMA engine,
> which will enable transfers in user-space addresses.

...

> +
> +static int sdma_device_probe(struct platform_device *pdev)
> +{
> +	int ret;
> +	struct device *dev;
> +	struct sdma_dev *sdev;
> +	struct sdma_hardware_info info;
> +
> +	dev = &pdev->dev;
> +
> +	if (!pdev->dev.bus) {
> +		pr_debug("the sdma dev bus is NULL\n");

How is this possible?

> +		return -EPROBE_DEFER;
> +	}
> +
> +	if (!pdev->dev.bus->iommu_ops) {
> +		pr_debug("defer probe sdma device\n");

Anyway, no pr_xxx, but dev_xxx. Is it really, really possible?


> +		return -EPROBE_DEFER;> +	}
> +
> +	sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
> +	if (!sdev) {
> +		pr_err("alloc sdma_device failed\n");
> +		return -ENOMEM;
> +	}
> +
> +	sdev->pdev = pdev;
> +	dev_set_drvdata(&pdev->dev, sdev);

Come on, you just stored pdev->dev under dev!
> +
> +	ret = of_sdma_collect_info(pdev, &info);
> +	if (ret < 0) {
> +		pr_err("collect device info failed, %d\n", ret);

dev_err

Please work on this driver to start looking like other kernel drivers.


> +		return ret;
> +	}
> +
> +	sdev->io_base = ioremap(info.base_addr, SDMA_IOMEM_SIZE);
> +	if (!sdev->io_base) {
> +		pr_err("ioremap failed\n");
> +		ret = -ENOMEM;
> +		return ret;
> +	}
> +
> +	/* Fill in dmaengine */
> +	sdma_init_dma_device(&sdev->dma_dev, dev);
> +
> +	ret = sdma_init_channels(sdev, &info);
> +	if (ret < 0)
> +		goto unmap_iobase;
> +
> +	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
> +	if (ret) {
> +		pr_err("iommu failed to init iopf, %d\n", ret);
> +		goto destroy_channels;
> +	}
> +
> +	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
> +	if (ret) {
> +		pr_err("iommu failed to init sva, %d\n", ret);
> +		goto disable_iopf;
> +	}
> +
> +	sdev->streamid = pdev->dev.iommu->fwspec->ids[0];
> +
> +	snprintf(sdev->name, SDMA_DEVICE_NAME_LENGTH_MAX, "sdma%d", sdev->idx);
> +	pr_info("%s device probe success\n", sdev->name);

No, drop.

> +
> +	ret = dma_async_device_register(&sdev->dma_dev);
> +	if (ret) {
> +		dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
> +		goto disable_sva;
> +	}
> +
> +	return 0;
> +
> +disable_sva:
> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
> +disable_iopf:
> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
> +destroy_channels:
> +	sdma_destroy_channels(sdev);
> +unmap_iobase:
> +	iounmap(sdev->io_base);
> +	return ret;
> +}
> +
> +static int sdma_device_remove(struct platform_device *pdev)
> +{
> +	struct sdma_dev *psdma_dev = dev_get_drvdata(&pdev->dev);
> +
> +	dma_async_device_unregister(&psdma_dev->dma_dev);
> +
> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
> +
> +	sdma_destroy_channels(psdma_dev);
> +
> +	iounmap(psdma_dev->io_base);
> +
> +	return 0;
> +}
> +
> +static const struct of_device_id sdma_of_match[] = {
> +	{ .compatible = "hisilicon,sdma" },
> +	{ }
> +};
> +MODULE_DEVICE_TABLE(of, sdma_of_match);
> +
> +static struct platform_driver sdma_driver = {
> +	.probe    = sdma_device_probe,
> +	.remove   = sdma_device_remove,
> +	.driver   = {
> +		.name           = SDMA_DEVICE_NAME,
> +		.of_match_table = sdma_of_match,
> +	},
> +};
> +
> +static int __init sdma_driver_init(void)
> +{
> +	return platform_driver_register(&sdma_driver);
> +}
> +module_init(sdma_driver_init);
> +
> +static void sdma_driver_exit(void)
> +{
> +	platform_driver_unregister(&sdma_driver);
> +}
> +module_exit(sdma_driver_exit);

module_platform_driver


Best regards,
Krzysztof


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support
  2023-08-14  8:54   ` Krzysztof Kozlowski
@ 2023-08-18 10:20     ` guomengqi (A)
  0 siblings, 0 replies; 8+ messages in thread
From: guomengqi (A) @ 2023-08-18 10:20 UTC (permalink / raw)
  To: Krzysztof Kozlowski, vkoul, dmaengine, robh+dt,
	krzysztof.kozlowski+dt, conor+dt, devicetree
  Cc: xuqiang36

在 2023/8/14 16:54, Krzysztof Kozlowski 写道:
> On 11/08/2023 05:48, Guo Mengqi wrote:
>> This patch adds a driver for HiSilicon Ascend SDMA engine.
>>
>> The DMA controller can do transfers between device and memory
>> or memory to memory. Currently, the controller only support
>> single copy. Drives can pass a substreamid to the DMA engine,
>> which will enable transfers in user-space addresses.
> ...
>
>> +
>> +static int sdma_device_probe(struct platform_device *pdev)
>> +{
>> +	int ret;
>> +	struct device *dev;
>> +	struct sdma_dev *sdev;
>> +	struct sdma_hardware_info info;
>> +
>> +	dev = &pdev->dev;
>> +
>> +	if (!pdev->dev.bus) {
>> +		pr_debug("the sdma dev bus is NULL\n");
> How is this possible?
    This shall not be possible. Removed.
>
>> +		return -EPROBE_DEFER;
>> +	}
>> +
>> +	if (!pdev->dev.bus->iommu_ops) {
>> +		pr_debug("defer probe sdma device\n");
> Anyway, no pr_xxx, but dev_xxx. Is it really, really possible?
>
If iommu driver is loaded later than this driver, then here iommu_ops 
may be uninitialized.
>> +		return -EPROBE_DEFER;> +	}
>> +
>> +	sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
>> +	if (!sdev) {
>> +		pr_err("alloc sdma_device failed\n");
>> +		return -ENOMEM;
>> +	}
>> +
>> +	sdev->pdev = pdev;
>> +	dev_set_drvdata(&pdev->dev, sdev);
> Come on, you just stored pdev->dev under dev!
>> +
>> +	ret = of_sdma_collect_info(pdev, &info);
>> +	if (ret < 0) {
>> +		pr_err("collect device info failed, %d\n", ret);
> dev_err
>
> Please work on this driver to start looking like other kernel drivers.
>
>
>> +		return ret;
>> +	}
>> +
>> +	sdev->io_base = ioremap(info.base_addr, SDMA_IOMEM_SIZE);
>> +	if (!sdev->io_base) {
>> +		pr_err("ioremap failed\n");
>> +		ret = -ENOMEM;
>> +		return ret;
>> +	}
>> +
>> +	/* Fill in dmaengine */
>> +	sdma_init_dma_device(&sdev->dma_dev, dev);
>> +
>> +	ret = sdma_init_channels(sdev, &info);
>> +	if (ret < 0)
>> +		goto unmap_iobase;
>> +
>> +	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
>> +	if (ret) {
>> +		pr_err("iommu failed to init iopf, %d\n", ret);
>> +		goto destroy_channels;
>> +	}
>> +
>> +	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
>> +	if (ret) {
>> +		pr_err("iommu failed to init sva, %d\n", ret);
>> +		goto disable_iopf;
>> +	}
>> +
>> +	sdev->streamid = pdev->dev.iommu->fwspec->ids[0];
>> +
>> +	snprintf(sdev->name, SDMA_DEVICE_NAME_LENGTH_MAX, "sdma%d", sdev->idx);
>> +	pr_info("%s device probe success\n", sdev->name);
> No, drop.
>
>> +
>> +	ret = dma_async_device_register(&sdev->dma_dev);
>> +	if (ret) {
>> +		dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
>> +		goto disable_sva;
>> +	}
>> +
>> +	return 0;
>> +
>> +disable_sva:
>> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
>> +disable_iopf:
>> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
>> +destroy_channels:
>> +	sdma_destroy_channels(sdev);
>> +unmap_iobase:
>> +	iounmap(sdev->io_base);
>> +	return ret;
>> +}
>> +
>> +static int sdma_device_remove(struct platform_device *pdev)
>> +{
>> +	struct sdma_dev *psdma_dev = dev_get_drvdata(&pdev->dev);
>> +
>> +	dma_async_device_unregister(&psdma_dev->dma_dev);
>> +
>> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
>> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
>> +
>> +	sdma_destroy_channels(psdma_dev);
>> +
>> +	iounmap(psdma_dev->io_base);
>> +
>> +	return 0;
>> +}
>> +
>> +static const struct of_device_id sdma_of_match[] = {
>> +	{ .compatible = "hisilicon,sdma" },
>> +	{ }
>> +};
>> +MODULE_DEVICE_TABLE(of, sdma_of_match);
>> +
>> +static struct platform_driver sdma_driver = {
>> +	.probe    = sdma_device_probe,
>> +	.remove   = sdma_device_remove,
>> +	.driver   = {
>> +		.name           = SDMA_DEVICE_NAME,
>> +		.of_match_table = sdma_of_match,
>> +	},
>> +};
>> +
>> +static int __init sdma_driver_init(void)
>> +{
>> +	return platform_driver_register(&sdma_driver);
>> +}
>> +module_init(sdma_driver_init);
>> +
>> +static void sdma_driver_exit(void)
>> +{
>> +	platform_driver_unregister(&sdma_driver);
>> +}
>> +module_exit(sdma_driver_exit);
> module_platform_driver
>
>
> Best regards,
> Krzysztof
>
> .

Hi

Thank you for carefully reviewing this patch and give all these advices.

I had sent a version 2 of the patch and fixed these points in dts 
bindings and driver code.

If any thing else is wrong, please let me know.


Best regards,

Guo Mengqi


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-08-18 10:20 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-08-11  3:48 [PATCH 0/2] Add sdma driver for HiSilicon Ascend platform Guo Mengqi
2023-08-11  3:48 ` [PATCH 1/2] dmaengine: Add HiSilicon Ascend SDMA engine support Guo Mengqi
2023-08-11  9:50   ` kernel test robot
2023-08-11 11:55   ` kernel test robot
2023-08-14  8:54   ` Krzysztof Kozlowski
2023-08-18 10:20     ` guomengqi (A)
2023-08-11  3:48 ` [PATCH 2/2] dt-bindings: dma: hisi: Add bindings for Hisi Ascend sdma Guo Mengqi
2023-08-14  8:51   ` Krzysztof Kozlowski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).