DMA Engine development
 help / color / mirror / Atom feed
* [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree
@ 2025-07-30  2:45 Zhu Yixin
  2025-07-30  2:45 ` [PATCH 2/5] dmaengine: lgm-dma: Correct ORRC MAX counter value Zhu Yixin
                   ` (4 more replies)
  0 siblings, 5 replies; 14+ messages in thread
From: Zhu Yixin @ 2025-07-30  2:45 UTC (permalink / raw)
  To: dmaengine, vkoul; +Cc: jchng, sureshnagaraj, Zhu Yixin

Remove platform data and unified the setting from device tree.

Signed-off-by: Zhu Yixin <yzhu@maxlinear.com>
---
 .../devicetree/bindings/dma/intel,ldma.yaml   |  67 ++++-
 drivers/dma/lgm/lgm-dma.c                     | 242 +++++++-----------
 2 files changed, 144 insertions(+), 165 deletions(-)

diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
index d6bb553a2c6f..59f928297613 100644
--- a/Documentation/devicetree/bindings/dma/intel,ldma.yaml
+++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
@@ -7,8 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Lightning Mountain centralized DMA controllers.
 
 maintainers:
-  - chuanhua.lei@intel.com
-  - mallikarjunax.reddy@intel.com
+  - yzhu@maxlinear.com
 
 allOf:
   - $ref: dma-controller.yaml#
@@ -16,14 +15,7 @@ allOf:
 properties:
   compatible:
     enum:
-      - intel,lgm-cdma
-      - intel,lgm-dma2tx
-      - intel,lgm-dma1rx
-      - intel,lgm-dma1tx
-      - intel,lgm-dma0tx
-      - intel,lgm-dma3
-      - intel,lgm-toe-dma30
-      - intel,lgm-toe-dma31
+      - intel,lgm-ldma
 
   reg:
     maxItems: 1
@@ -80,16 +72,62 @@ properties:
       if it is disabled, the DMA RX will still support programmable fixed burst size of 2,4,8,16.
       It only applies to RX DMA and memcopy DMA.
 
+  intel,dma-flowctrl:
+    type: boolean
+    description:
+      DMA per-channel flow control.
+
+  intel,dma-fod:
+    type: boolean
+    description:
+      DMA fetch-on-demand.
+      It should only be enabled when DMA connected to a component that can
+      provide fetch-on-demand signal to DMA.
+
+  intel,dma-desc-in-sram:
+    type: boolean
+    description:
+      DMA descriptor in sram.
+      It only affects legacy DMA(V22)
+      DMA version V31 onwards, it is always enabled and setting ignored by
+      DMA HW.
+
+  intel,dma-desc-fack:
+    type: boolean
+    description:
+      DMA descriptor fetch acknowledgement.
+      This feature only takes effects if DMA fetch-on-demand is enabled.
+
+  intel,dma-orrc:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      DMA outstanding descriptor read.
+      The maximum orrc count is 16.
+
+  intel,dma-type:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      DMA type. Only valid for DMAV31 onwards.
+      DMA type TX is 0.
+      DMA type RX is 1.
+      DMA type MCPY is 2.
+
+  intel,dma-name:
+    $ref: /schemas/types.yaml#/definitions/string
+    description:
+      Name of the DMA.
+
 required:
   - compatible
   - reg
+  - intel,dma-name
 
 additionalProperties: false
 
 examples:
   - |
     dma0: dma-controller@e0e00000 {
-      compatible = "intel,lgm-cdma";
+      compatible = "intel,lgm-ldma";
       reg = <0xe0e00000 0x1000>;
       #dma-cells = <3>;
       dma-channels = <16>;
@@ -102,10 +140,11 @@ examples:
       intel,dma-poll-cnt = <4>;
       intel,dma-byte-en;
       intel,dma-drb;
+      intel,dma-name = "dma0";
     };
   - |
     dma3: dma-controller@ec800000 {
-      compatible = "intel,lgm-dma3";
+      compatible = "intel,lgm-ldma";
       reg = <0xec800000 0x1000>;
       clocks = <&cgu0 71>;
       resets = <&rcu0 0x10 9>;
@@ -113,4 +152,8 @@ examples:
       intel,dma-poll-cnt = <16>;
       intel,dma-byte-en;
       intel,dma-dburst-wr;
+      intel,dma-type = <2>;
+      intel,dma-desc-in-sram;
+      intel,dma-name = "dma3";
+      intel,dma-orrc = <16>;
     };
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 8173c3f1075a..93438cc9f020 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -3,6 +3,7 @@
  * Lightning Mountain centralized DMA controller driver
  *
  * Copyright (c) 2016 - 2020 Intel Corporation.
+ * Copyright (c) 2020 - 2025 Maxlinear Inc.
  */
 
 #include <linux/bitfield.h>
@@ -139,6 +140,7 @@
 #define DMA_VALID_DESC_FETCH_ACK	BIT(7)
 #define DMA_DFT_DRB			BIT(8)
 
+#define DMA_DFT_ORRC_CNT		16
 #define DMA_ORRC_MAX_CNT		(SZ_32 - 1)
 #define DMA_DFT_POLL_CNT		SZ_4
 #define DMA_DFT_BURST_V22		SZ_2
@@ -183,11 +185,17 @@ enum ldma_chan_on_off {
 };
 
 enum {
+	DMA_TYPE_INVD = -1, /* Legacy DMA does not have type */
 	DMA_TYPE_TX = 0,
 	DMA_TYPE_RX,
 	DMA_TYPE_MCPY,
 };
 
+enum {
+	DMA_IN_HW_MODE,
+	DMA_IN_SW_MODE,
+};
+
 struct ldma_dev;
 struct ldma_port;
 
@@ -218,6 +226,7 @@ struct ldma_chan {
 	struct dw2_desc_sw	*ds;
 	struct work_struct	work;
 	struct dma_slave_config config;
+	int			mode;
 };
 
 struct ldma_port {
@@ -230,17 +239,6 @@ struct ldma_port {
 	u32			pkt_drop;
 };
 
-/* Instance specific data */
-struct ldma_inst_data {
-	bool			desc_in_sram;
-	bool			chan_fc;
-	bool			desc_fod; /* Fetch On Demand */
-	bool			valid_desc_fetch_ack;
-	u32			orrc; /* Outstanding read count */
-	const char		*name;
-	u32			type;
-};
-
 struct ldma_dev {
 	struct device		*dev;
 	void __iomem		*base;
@@ -257,7 +255,9 @@ struct ldma_dev {
 	u32			channels_mask;
 	u32			flags;
 	u32			pollcnt;
-	const struct ldma_inst_data *inst;
+	u32			orrc; /* Outstanding read count */
+	int			type;
+	const char		*name;
 	struct workqueue_struct	*wq;
 };
 
@@ -349,7 +349,7 @@ static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev *d, bool enable)
 	unsigned long flags;
 	u32 mask, val;
 
-	if (d->inst->type != DMA_TYPE_TX)
+	if (d->type != DMA_TYPE_TX)
 		return;
 
 	mask = DMA_CTRL_CH_FL;
@@ -378,7 +378,7 @@ static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev *d, bool enable)
 	unsigned long flags;
 	u32 mask, val;
 
-	if (d->inst->type == DMA_TYPE_MCPY)
+	if (d->type == DMA_TYPE_MCPY)
 		return;
 
 	mask = DMA_CTRL_DS_FOD;
@@ -406,12 +406,12 @@ static void ldma_dev_orrc_cfg(struct ldma_dev *d)
 	u32 val = 0;
 	u32 mask;
 
-	if (d->inst->type == DMA_TYPE_RX)
+	if (d->type == DMA_TYPE_RX)
 		return;
 
 	mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT;
-	if (d->inst->orrc > 0 && d->inst->orrc <= DMA_ORRC_MAX_CNT)
-		val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->inst->orrc);
+	if (d->orrc > 0 && d->orrc <= DMA_ORRC_MAX_CNT)
+		val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->orrc);
 
 	spin_lock_irqsave(&d->dev_lock, flags);
 	ldma_update_bits(d, mask, val, DMA_ORRC);
@@ -439,7 +439,7 @@ static void ldma_dev_dburst_wr_cfg(struct ldma_dev *d, bool enable)
 	unsigned long flags;
 	u32 mask, val;
 
-	if (d->inst->type != DMA_TYPE_RX && d->inst->type != DMA_TYPE_MCPY)
+	if (d->type != DMA_TYPE_RX && d->type != DMA_TYPE_MCPY)
 		return;
 
 	mask = DMA_CTRL_DBURST_WR;
@@ -455,7 +455,7 @@ static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev *d, bool enable)
 	unsigned long flags;
 	u32 mask, val;
 
-	if (d->inst->type != DMA_TYPE_TX)
+	if (d->type != DMA_TYPE_TX)
 		return;
 
 	mask = DMA_CTRL_VLD_DF_ACK;
@@ -511,7 +511,7 @@ static int ldma_dev_cfg(struct ldma_dev *d)
 	}
 
 	dev_dbg(d->dev, "%s Controller 0x%08x configuration done\n",
-		d->inst->name, readl(d->base + DMA_CTRL));
+		d->name, readl(d->base + DMA_CTRL));
 
 	return 0;
 }
@@ -578,7 +578,7 @@ static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
 	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
 	u32 class_val;
 
-	if (d->inst->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS)
+	if (d->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS)
 		return;
 
 	/* 3 bits low */
@@ -929,26 +929,41 @@ static int ldma_parse_dt(struct ldma_dev *d)
 	if (fwnode_property_read_bool(fwnode, "intel,dma-drb"))
 		d->flags |= DMA_DFT_DRB;
 
-	if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt",
-				     &d->pollcnt))
-		d->pollcnt = DMA_DFT_POLL_CNT;
-
-	if (d->inst->chan_fc)
+	if (fwnode_property_read_bool(fwnode, "intel,dma-flowctrl"))
 		d->flags |= DMA_CHAN_FLOW_CTL;
 
-	if (d->inst->desc_fod)
+	if (fwnode_property_read_bool(fwnode, "intel,dma-fod"))
 		d->flags |= DMA_DESC_FOD;
 
-	if (d->inst->desc_in_sram)
+	if (fwnode_property_read_bool(fwnode, "intel,dma-desc-in-sram"))
 		d->flags |= DMA_DESC_IN_SRAM;
 
-	if (d->inst->valid_desc_fetch_ack)
+	if (fwnode_property_read_bool(fwnode, "intel,dma-desc-fack"))
 		d->flags |= DMA_VALID_DESC_FETCH_ACK;
 
-	if (d->ver > DMA_VER22) {
-		if (!d->port_nrs)
-			return -EINVAL;
+	if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt",
+				     &d->pollcnt))
+		d->pollcnt = DMA_DFT_POLL_CNT;
+
+	if (fwnode_property_read_u32(fwnode, "intel,dma-orrc",
+				     &d->orrc))
+		d->orrc = DMA_DFT_ORRC_CNT;
+
+	if (fwnode_property_read_u32(fwnode, "intel,dma-type",
+				     &d->type))
+		d->type = DMA_TYPE_INVD;
 
+	if (fwnode_property_read_u32(fwnode, "dma-channel-mask",
+				     &d->channels_mask))
+		d->channels_mask = GENMASK(d->chan_nrs - 1, 0);
+
+	if (fwnode_property_read_string(fwnode, "intel,dma-name",
+					&d->name)) {
+		dev_err(d->dev, "DMA name not available!\n");
+		return -EINVAL;
+	}
+
+	if (d->ver > DMA_VER22) {
 		for (i = 0; i < d->port_nrs; i++) {
 			p = &d->ports[i];
 			p->rxendi = DMA_DFT_ENDIAN;
@@ -1471,93 +1486,48 @@ static void ldma_clk_disable(void *data)
 	reset_control_assert(d->rst);
 }
 
-static const struct ldma_inst_data dma0 = {
-	.name = "dma0",
-	.chan_fc = false,
-	.desc_fod = false,
-	.desc_in_sram = false,
-	.valid_desc_fetch_ack = false,
-};
-
-static const struct ldma_inst_data dma2tx = {
-	.name = "dma2tx",
-	.type = DMA_TYPE_TX,
-	.orrc = 16,
-	.chan_fc = true,
-	.desc_fod = true,
-	.desc_in_sram = true,
-	.valid_desc_fetch_ack = true,
-};
+static int intel_ldma_port_channel_init(struct ldma_dev *d)
+{
+	struct ldma_chan *c;
+	struct ldma_port *p;
+	unsigned long ch_mask;
+	int i,j;
 
-static const struct ldma_inst_data dma1rx = {
-	.name = "dma1rx",
-	.type = DMA_TYPE_RX,
-	.orrc = 16,
-	.chan_fc = false,
-	.desc_fod = true,
-	.desc_in_sram = true,
-	.valid_desc_fetch_ack = false,
-};
+	/* Port Initializations */
+	d->ports = devm_kcalloc(d->dev, d->port_nrs, sizeof(*p), GFP_KERNEL);
+	if (!d->ports)
+		return -ENOMEM;
 
-static const struct ldma_inst_data dma1tx = {
-	.name = "dma1tx",
-	.type = DMA_TYPE_TX,
-	.orrc = 16,
-	.chan_fc = true,
-	.desc_fod = true,
-	.desc_in_sram = true,
-	.valid_desc_fetch_ack = true,
-};
+	/* Channels Initializations */
+	d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL);
+	if (!d->chans)
+		return -ENOMEM;
 
-static const struct ldma_inst_data dma0tx = {
-	.name = "dma0tx",
-	.type = DMA_TYPE_TX,
-	.orrc = 16,
-	.chan_fc = true,
-	.desc_fod = true,
-	.desc_in_sram = true,
-	.valid_desc_fetch_ack = true,
-};
+	for (i = 0; i < d->port_nrs; i++) {
+		p = &d->ports[i];
+		p->portid = i;
+		p->ldev = d;
 
-static const struct ldma_inst_data dma3 = {
-	.name = "dma3",
-	.type = DMA_TYPE_MCPY,
-	.orrc = 16,
-	.chan_fc = false,
-	.desc_fod = false,
-	.desc_in_sram = true,
-	.valid_desc_fetch_ack = false,
-};
+		p->rxendi = DMA_DFT_ENDIAN;
+		p->txendi = DMA_DFT_ENDIAN;
+		p->rxbl = DMA_DFT_BURST;
+		p->txbl = DMA_DFT_BURST;
+		p->pkt_drop = DMA_PKT_DROP_DIS;
+	}
 
-static const struct ldma_inst_data toe_dma30 = {
-	.name = "toe_dma30",
-	.type = DMA_TYPE_MCPY,
-	.orrc = 16,
-	.chan_fc = false,
-	.desc_fod = false,
-	.desc_in_sram = true,
-	.valid_desc_fetch_ack = true,
-};
+	ch_mask = (unsigned long)d->channels_mask;
+	for_each_set_bit(j, &ch_mask, d->chan_nrs) {
+		if (d->ver == DMA_VER22)
+			ldma_dma_init_v22(j, d);
+		else
+			ldma_dma_init_v3X(j, d);
+	}
 
-static const struct ldma_inst_data toe_dma31 = {
-	.name = "toe_dma31",
-	.type = DMA_TYPE_MCPY,
-	.orrc = 16,
-	.chan_fc = false,
-	.desc_fod = false,
-	.desc_in_sram = true,
-	.valid_desc_fetch_ack = true,
-};
+	return 0;
+}
 
 static const struct of_device_id intel_ldma_match[] = {
-	{ .compatible = "intel,lgm-cdma", .data = &dma0},
-	{ .compatible = "intel,lgm-dma2tx", .data = &dma2tx},
-	{ .compatible = "intel,lgm-dma1rx", .data = &dma1rx},
-	{ .compatible = "intel,lgm-dma1tx", .data = &dma1tx},
-	{ .compatible = "intel,lgm-dma0tx", .data = &dma0tx},
-	{ .compatible = "intel,lgm-dma3", .data = &dma3},
-	{ .compatible = "intel,lgm-toe-dma30", .data = &toe_dma30},
-	{ .compatible = "intel,lgm-toe-dma31", .data = &toe_dma31},
+	{ .compatible = "intel,lgm-ldma" },
 	{}
 };
 
@@ -1565,12 +1535,9 @@ static int intel_ldma_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct dma_device *dma_dev;
-	unsigned long ch_mask;
-	struct ldma_chan *c;
-	struct ldma_port *p;
 	struct ldma_dev *d;
-	u32 id, bitn = 32, j;
-	int i, ret;
+	u32 id, bitn = 32;
+	int ret;
 
 	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
 	if (!d)
@@ -1579,12 +1546,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
 	/* Link controller to platform device */
 	d->dev = &pdev->dev;
 
-	d->inst = device_get_match_data(dev);
-	if (!d->inst) {
-		dev_err(dev, "No device match found\n");
-		return -ENODEV;
-	}
-
 	d->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(d->base))
 		return PTR_ERR(d->base);
@@ -1627,17 +1588,18 @@ static int intel_ldma_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	ret = ldma_parse_dt(d);
+	if (ret)
+		return ret;
+
 	if (d->ver == DMA_VER22) {
 		ret = ldma_init_v22(d, pdev);
 		if (ret)
 			return ret;
 	}
 
-	ret = device_property_read_u32(dev, "dma-channel-mask", &d->channels_mask);
-	if (ret < 0)
-		d->channels_mask = GENMASK(d->chan_nrs - 1, 0);
-
 	dma_dev = &d->dma_dev;
+	dma_dev->dev = &pdev->dev;
 
 	dma_cap_zero(dma_dev->cap_mask);
 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
@@ -1645,33 +1607,7 @@ static int intel_ldma_probe(struct platform_device *pdev)
 	/* Channel initializations */
 	INIT_LIST_HEAD(&dma_dev->channels);
 
-	/* Port Initializations */
-	d->ports = devm_kcalloc(dev, d->port_nrs, sizeof(*p), GFP_KERNEL);
-	if (!d->ports)
-		return -ENOMEM;
-
-	/* Channels Initializations */
-	d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL);
-	if (!d->chans)
-		return -ENOMEM;
-
-	for (i = 0; i < d->port_nrs; i++) {
-		p = &d->ports[i];
-		p->portid = i;
-		p->ldev = d;
-	}
-
-	dma_dev->dev = &pdev->dev;
-
-	ch_mask = (unsigned long)d->channels_mask;
-	for_each_set_bit(j, &ch_mask, d->chan_nrs) {
-		if (d->ver == DMA_VER22)
-			ldma_dma_init_v22(j, d);
-		else
-			ldma_dma_init_v3X(j, d);
-	}
-
-	ret = ldma_parse_dt(d);
+	ret = intel_ldma_port_channel_init(d);
 	if (ret)
 		return ret;
 
-- 
2.43.5


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 2/5] dmaengine: lgm-dma: Correct ORRC MAX counter value.
  2025-07-30  2:45 [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Zhu Yixin
@ 2025-07-30  2:45 ` Zhu Yixin
  2025-07-30  2:45 ` [PATCH 3/5] dmaengine: lgm-dma: split legacy DMA and HDMA functions Zhu Yixin
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 14+ messages in thread
From: Zhu Yixin @ 2025-07-30  2:45 UTC (permalink / raw)
  To: dmaengine, vkoul; +Cc: jchng, sureshnagaraj, Zhu Yixin

The maximum ORRC counter is 16.
Sanity check and recify the value when get from device tree.

Signed-off-by: Zhu Yixin <yzhu@maxlinear.com>
---
 drivers/dma/lgm/lgm-dma.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 93438cc9f020..ea185c5de1b2 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -140,8 +140,7 @@
 #define DMA_VALID_DESC_FETCH_ACK	BIT(7)
 #define DMA_DFT_DRB			BIT(8)
 
-#define DMA_DFT_ORRC_CNT		16
-#define DMA_ORRC_MAX_CNT		(SZ_32 - 1)
+#define DMA_ORRC_MAX_CNT		16
 #define DMA_DFT_POLL_CNT		SZ_4
 #define DMA_DFT_BURST_V22		SZ_2
 #define DMA_BURSTL_8DW			SZ_8
@@ -406,12 +405,11 @@ static void ldma_dev_orrc_cfg(struct ldma_dev *d)
 	u32 val = 0;
 	u32 mask;
 
-	if (d->type == DMA_TYPE_RX)
+	if (d->type == DMA_TYPE_RX || !d->orrc)
 		return;
 
 	mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT;
-	if (d->orrc > 0 && d->orrc <= DMA_ORRC_MAX_CNT)
-		val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->orrc);
+	val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->orrc);
 
 	spin_lock_irqsave(&d->dev_lock, flags);
 	ldma_update_bits(d, mask, val, DMA_ORRC);
@@ -946,8 +944,11 @@ static int ldma_parse_dt(struct ldma_dev *d)
 		d->pollcnt = DMA_DFT_POLL_CNT;
 
 	if (fwnode_property_read_u32(fwnode, "intel,dma-orrc",
-				     &d->orrc))
-		d->orrc = DMA_DFT_ORRC_CNT;
+				     &d->orrc)) {
+		d->orrc = 0;
+	} else if (d->orrc > DMA_ORRC_MAX_CNT) {
+		d->orrc = DMA_ORRC_MAX_CNT;
+	}
 
 	if (fwnode_property_read_u32(fwnode, "intel,dma-type",
 				     &d->type))
-- 
2.43.5


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 3/5] dmaengine: lgm-dma: split legacy DMA and HDMA functions
  2025-07-30  2:45 [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Zhu Yixin
  2025-07-30  2:45 ` [PATCH 2/5] dmaengine: lgm-dma: Correct ORRC MAX counter value Zhu Yixin
@ 2025-07-30  2:45 ` Zhu Yixin
  2025-07-30  6:20   ` Krzysztof Kozlowski
  2025-07-30  2:45 ` [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function Zhu Yixin
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 14+ messages in thread
From: Zhu Yixin @ 2025-07-30  2:45 UTC (permalink / raw)
  To: dmaengine, vkoul; +Cc: jchng, sureshnagaraj, Zhu Yixin

Move legacy DMA functions into lgm-cdma.c.
Move HDMA functions into lgm-hdma.c
Keep the driver flow and general functions in lgm-dma.c

Signed-off-by: Zhu Yixin <yzhu@maxlinear.com>
---
 .../devicetree/bindings/dma/intel,ldma.yaml   |   7 +-
 drivers/dma/lgm/Makefile                      |   2 +-
 drivers/dma/lgm/lgm-cdma.c                    | 492 ++++++++++
 drivers/dma/lgm/lgm-dma.c                     | 839 ++----------------
 drivers/dma/lgm/lgm-dma.h                     | 278 ++++++
 drivers/dma/lgm/lgm-hdma.c                    | 130 +++
 6 files changed, 961 insertions(+), 787 deletions(-)
 create mode 100644 drivers/dma/lgm/lgm-cdma.c
 create mode 100644 drivers/dma/lgm/lgm-dma.h
 create mode 100644 drivers/dma/lgm/lgm-hdma.c

diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
index 59f928297613..f91d849edc4c 100644
--- a/Documentation/devicetree/bindings/dma/intel,ldma.yaml
+++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
@@ -15,7 +15,8 @@ allOf:
 properties:
   compatible:
     enum:
-      - intel,lgm-ldma
+      - intel,lgm-cdma
+      - intel,lgm-hdma
 
   reg:
     maxItems: 1
@@ -127,7 +128,7 @@ additionalProperties: false
 examples:
   - |
     dma0: dma-controller@e0e00000 {
-      compatible = "intel,lgm-ldma";
+      compatible = "intel,lgm-cdma";
       reg = <0xe0e00000 0x1000>;
       #dma-cells = <3>;
       dma-channels = <16>;
@@ -144,7 +145,7 @@ examples:
     };
   - |
     dma3: dma-controller@ec800000 {
-      compatible = "intel,lgm-ldma";
+      compatible = "intel,lgm-hdma";
       reg = <0xec800000 0x1000>;
       clocks = <&cgu0 71>;
       resets = <&rcu0 0x10 9>;
diff --git a/drivers/dma/lgm/Makefile b/drivers/dma/lgm/Makefile
index f318a8eff464..1246082253ff 100644
--- a/drivers/dma/lgm/Makefile
+++ b/drivers/dma/lgm/Makefile
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_INTEL_LDMA)	+= lgm-dma.o
+obj-$(CONFIG_INTEL_LDMA)	+= lgm-dma.o lgm-cdma.o lgm-hdma.o
diff --git a/drivers/dma/lgm/lgm-cdma.c b/drivers/dma/lgm/lgm-cdma.c
new file mode 100644
index 000000000000..0acb30706c42
--- /dev/null
+++ b/drivers/dma/lgm/lgm-cdma.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lightning Mountain centralized DMA controller driver
+ *
+ * Copyright (c) 2025 Maxlinear Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+#include "lgm-dma.h"
+
+struct dw2_desc {
+	u32 field;
+	u32 addr;
+} __packed __aligned(8);
+
+struct dw2_desc_sw {
+	struct virt_dma_desc	vdesc;
+	struct ldma_chan	*chan;
+	dma_addr_t		desc_phys;
+	size_t			desc_cnt;
+	size_t			size;
+	struct dw2_desc		*desc_hw;
+};
+
+struct cdma_chan {
+	struct dma_pool		*desc_pool; /* Descriptors pool */
+	u32			desc_cnt; /* Descriptor length */
+	struct dw2_desc_sw	*ds;
+	struct work_struct	work;
+	struct dma_slave_config config;
+};
+
+struct cdma_dev {
+	struct ldma_dev		*ldev;
+	struct workqueue_struct	*wq;
+};
+
+static int cdma_ctrl_init(struct ldma_dev *d);
+static int cdma_port_init(struct ldma_dev *d, struct ldma_port *p);
+static int cdma_chan_init(struct ldma_dev *d, struct ldma_chan *c);
+static int cdma_irq_init(struct ldma_dev *d, struct platform_device *pdev);
+static void cdma_func_init(struct dma_device *dma_dev);
+static irqreturn_t cdma_interrupt(int irq, void *dev_id);
+
+static struct cdma_dev *g_cdma_dev;
+
+struct ldma_ops cdma_ops = {
+	.dma_ctrl_init = cdma_ctrl_init,
+	.dma_port_init = cdma_port_init,
+	.dma_chan_init = cdma_chan_init,
+	.dma_irq_init  = cdma_irq_init,
+	.dma_func_init = cdma_func_init,
+};
+
+static inline struct dw2_desc_sw *to_lgm_cdma_desc(struct virt_dma_desc *vdesc)
+{
+	return container_of(vdesc, struct dw2_desc_sw, vdesc);
+}
+
+static void cdma_free_desc_resource(struct virt_dma_desc *vdesc)
+{
+	struct dw2_desc_sw *ds = to_lgm_cdma_desc(vdesc);
+	struct ldma_chan *c = ds->chan;
+	struct cdma_chan *chan = c->priv;
+
+	dma_pool_free(chan->desc_pool, ds->desc_hw, ds->desc_phys);
+	kfree(ds);
+	chan->ds = NULL;
+}
+
+static void cdma_work(struct work_struct *work)
+{
+	struct ldma_chan *c;
+	struct cdma_chan *chan;
+	struct dma_async_tx_descriptor *tx;
+	struct virt_dma_chan *vc;
+	struct dmaengine_desc_callback cb;
+	struct virt_dma_desc *vd, *_vd;
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	chan = container_of(work, struct cdma_chan, work);
+	if (!chan->ds)
+		return;
+	c = chan->ds[0].chan;
+	tx = &chan->ds->vdesc.tx;
+	vc = &c->vchan;
+
+	spin_lock_irqsave(&c->vchan.lock, flags);
+	list_splice_tail_init(&vc->desc_completed, &head);
+	spin_unlock_irqrestore(&c->vchan.lock, flags);
+	dmaengine_desc_get_callback(tx, &cb);
+	dma_cookie_complete(tx);
+	dmaengine_desc_callback_invoke(&cb, NULL);
+
+	list_for_each_entry_safe(vd, _vd, &head, node) {
+		dmaengine_desc_get_callback(tx, &cb);
+		dma_cookie_complete(tx);
+		list_del(&vd->node);
+		dmaengine_desc_callback_invoke(&cb, NULL);
+
+		vchan_vdesc_fini(vd);
+	}
+}
+
+static int cdma_ctrl_init(struct ldma_dev *d)
+{
+	struct cdma_dev *cdma;
+
+	cdma = devm_kzalloc(d->dev, sizeof(*cdma), GFP_KERNEL);
+	if (!cdma)
+		return -ENOMEM;
+
+	cdma->ldev = d;
+	cdma->wq = alloc_ordered_workqueue("dma_wq",
+					   WQ_MEM_RECLAIM | WQ_HIGHPRI);
+	if (!cdma->wq)
+		return -ENOMEM;
+
+	g_cdma_dev = cdma;
+
+	return 0;
+}
+
+static int cdma_port_init(struct ldma_dev *d, struct ldma_port *p)
+{
+	return 0;
+}
+
+static int cdma_chan_init(struct ldma_dev *d, struct ldma_chan *c)
+{
+	struct cdma_chan *chan;
+
+	c->rst = DMA_CHAN_RST;
+	c->desc_cnt = DMA_DFT_DESC_NUM;
+	snprintf(c->name, sizeof(c->name), "chan%d", c->nr);
+	c->vchan.desc_free = cdma_free_desc_resource;
+	vchan_init(&c->vchan, &d->dma_dev);
+
+	chan = devm_kzalloc(d->dev, sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+
+	INIT_WORK(&chan->work, cdma_work);
+	c->priv = chan;
+
+	return 0;
+}
+
+static int cdma_irq_init(struct ldma_dev *d, struct platform_device *pdev)
+{
+	d->irq = platform_get_irq(pdev, 0);
+	if (d->irq < 0)
+		return d->irq;
+
+	return devm_request_irq(d->dev, d->irq, cdma_interrupt, 0,
+				DRIVER_NAME, d);
+}
+
+static void cdma_chan_irq(int irq, void *data)
+{
+	struct ldma_chan *c = data;
+	struct ldma_dev *d = g_cdma_dev->ldev;
+	struct cdma_chan *chan;
+	u32 stat;
+
+	/* Disable channel interrupts  */
+	writel(c->nr, d->base + DMA_CS);
+	stat = readl(d->base + DMA_CIS);
+	if (!stat)
+		return;
+
+	writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE);
+	writel(stat, d->base + DMA_CIS);
+	chan = (struct cdma_chan *)c->priv;
+	queue_work(g_cdma_dev->wq, &chan->work);
+}
+
+static irqreturn_t cdma_interrupt(int irq, void *dev_id)
+{
+	struct ldma_dev *d = dev_id;
+	struct ldma_chan *c;
+	unsigned long irncr;
+	u32 cid;
+
+	irncr = readl(d->base + DMA_IRNCR);
+	if (!irncr) {
+		dev_err(d->dev, "dummy interrupt\n");
+		return IRQ_NONE;
+	}
+
+	for_each_set_bit(cid, &irncr, d->chan_nrs) {
+		/* Mask */
+		writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN);
+		/* Ack */
+		writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR);
+
+		c = &d->chans[cid];
+		cdma_chan_irq(irq, c);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int cdma_alloc_chan_resources(struct dma_chan *dma_chan)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
+	struct device *dev = d->dev;
+	size_t desc_sz;
+
+	if (chan->desc_pool)
+		return c->desc_cnt;
+
+	desc_sz = c->desc_cnt * sizeof(struct dw2_desc);
+	chan->desc_pool = dma_pool_create(c->name, dev, desc_sz,
+					  __alignof__(struct dw2_desc), 0);
+
+	if (!chan->desc_pool) {
+		dev_err(dev, "unable to allocate descriptor pool\n");
+		return -ENOMEM;
+	}
+	chan->desc_cnt = c->desc_cnt;
+
+	return c->desc_cnt;
+}
+
+static void cdma_free_chan_resources(struct dma_chan *dma_chan)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
+
+	dma_pool_destroy(chan->desc_pool);
+	chan->desc_pool = NULL;
+	vchan_free_chan_resources(to_virt_chan(dma_chan));
+	ldma_chan_reset(c);
+}
+
+static void cdma_synchronize(struct dma_chan *dma_chan)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
+
+	/*
+	 * clear any pending work if any. In that
+	 * case the resource needs to be free here.
+	 */
+	cancel_work_sync(&chan->work);
+	vchan_synchronize(&c->vchan);
+	if (chan->ds)
+		cdma_free_desc_resource(&chan->ds->vdesc);
+}
+
+static int
+cdma_slave_config(struct dma_chan *dma_chan, struct dma_slave_config *cfg)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
+
+	memcpy(&chan->config, cfg, sizeof(chan->config));
+
+	return 0;
+}
+
+static void cdma_chan_irq_en(struct ldma_chan *c)
+{
+	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&d->dev_lock, flags);
+	writel(c->nr, d->base + DMA_CS);
+	writel(DMA_CI_EOP, d->base + DMA_CIE);
+	writel(BIT(c->nr), d->base + DMA_IRNEN);
+	spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void cdma_issue_pending(struct dma_chan *dma_chan)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
+	//struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vchan.lock, flags);
+	if (vchan_issue_pending(&c->vchan)) {
+		struct virt_dma_desc *vdesc;
+
+		/* Get the next descriptor */
+		vdesc = vchan_next_desc(&c->vchan);
+		if (!vdesc) {
+			chan->ds = NULL;
+			spin_unlock_irqrestore(&c->vchan.lock, flags);
+			return;
+		}
+		list_del(&vdesc->node);
+		chan->ds = to_lgm_cdma_desc(vdesc);
+		ldma_chan_desc_hw_cfg(c, chan->ds->desc_phys,
+				      chan->ds->desc_cnt);
+		cdma_chan_irq_en(c);
+	}
+	spin_unlock_irqrestore(&c->vchan.lock, flags);
+
+	ldma_chan_on(c);
+}
+
+static enum dma_status
+cdma_tx_status(struct dma_chan *dma_chan, dma_cookie_t cookie,
+	       struct dma_tx_state *txstate)
+{
+	enum dma_status status = DMA_COMPLETE;
+
+	status = dma_cookie_status(dma_chan, cookie, txstate);
+
+	return status;
+}
+
+static struct dw2_desc_sw *
+cdma_alloc_desc_resource(int num, struct ldma_chan *c)
+{
+	struct device *dev = g_cdma_dev->ldev->dev;
+	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
+	struct dw2_desc_sw *ds;
+
+	if (num > c->desc_cnt) {
+		dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_cnt);
+		return NULL;
+	}
+
+	ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
+	if (!ds)
+		return NULL;
+
+	ds->chan = c;
+	ds->desc_hw = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC,
+				      &ds->desc_phys);
+	if (!ds->desc_hw) {
+		dev_dbg(dev, "out of memory for link descriptor\n");
+		kfree(ds);
+		return NULL;
+	}
+	ds->desc_cnt = num;
+
+	return ds;
+}
+
+static void prep_slave_burst_len(struct ldma_chan *c)
+{
+	struct ldma_port *p = c->port;
+	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
+	struct dma_slave_config *cfg = &chan->config;
+
+	if (cfg->dst_maxburst)
+		cfg->src_maxburst = cfg->dst_maxburst;
+
+	/* TX and RX has the same burst length */
+	p->txbl = ilog2(cfg->src_maxburst);
+	p->rxbl = p->txbl;
+}
+
+static struct dma_async_tx_descriptor *
+cdma_prep_slave_sg(struct dma_chan *dma_chan, struct scatterlist *sgl,
+		   unsigned int sglen, enum dma_transfer_direction dir,
+		   unsigned long flags, void *context)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
+	size_t len, avail, total = 0;
+	struct dw2_desc *hw_ds;
+	struct dw2_desc_sw *ds;
+	struct scatterlist *sg;
+	int num = sglen, i;
+	dma_addr_t addr;
+
+	if (!sgl)
+		return NULL;
+
+	for_each_sg(sgl, sg, sglen, i) {
+		avail = sg_dma_len(sg);
+		if (avail > DMA_MAX_SIZE)
+			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+	}
+
+	ds = cdma_alloc_desc_resource(num, c);
+	if (!ds)
+		return NULL;
+
+	chan->ds = ds;
+
+	num = 0;
+	/* sop and eop has to be handled nicely */
+	for_each_sg(sgl, sg, sglen, i) {
+		addr = sg_dma_address(sg);
+		avail = sg_dma_len(sg);
+		total += avail;
+
+		do {
+			len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+			hw_ds = &ds->desc_hw[num];
+			switch (sglen) {
+			case 1:
+				hw_ds->field &= ~DESC_SOP;
+				hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
+
+				hw_ds->field &= ~DESC_EOP;
+				hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
+				break;
+			default:
+				if (num == 0) {
+					hw_ds->field &= ~DESC_SOP;
+					hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
+
+					hw_ds->field &= ~DESC_EOP;
+					hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
+				} else if (num == (sglen - 1)) {
+					hw_ds->field &= ~DESC_SOP;
+					hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
+					hw_ds->field &= ~DESC_EOP;
+					hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
+				} else {
+					hw_ds->field &= ~DESC_SOP;
+					hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
+
+					hw_ds->field &= ~DESC_EOP;
+					hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
+				}
+				break;
+			}
+			/* Only 32 bit address supported */
+			hw_ds->addr = (u32)addr;
+
+			hw_ds->field &= ~DESC_DATA_LEN;
+			hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len);
+
+			hw_ds->field &= ~DESC_C;
+			hw_ds->field |= FIELD_PREP(DESC_C, 0);
+
+			hw_ds->field &= ~DESC_BYTE_OFF;
+			hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3);
+
+			/* Ensure data ready before ownership change */
+			wmb();
+			hw_ds->field &= ~DESC_OWN;
+			hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN);
+
+			/* Ensure ownership changed before moving forward */
+			wmb();
+			num++;
+			addr += len;
+			avail -= len;
+		} while (avail);
+	}
+
+	ds->size = total;
+	prep_slave_burst_len(c);
+
+	return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK);
+}
+
+static void cdma_func_init(struct dma_device *dma_dev)
+{
+	dma_dev->device_alloc_chan_resources = cdma_alloc_chan_resources;
+	dma_dev->device_free_chan_resources = cdma_free_chan_resources;
+	dma_dev->device_terminate_all = ldma_terminate_all;
+	dma_dev->device_issue_pending = cdma_issue_pending;
+	dma_dev->device_tx_status = cdma_tx_status;
+	dma_dev->device_resume = ldma_resume_chan;
+	dma_dev->device_pause = ldma_pause_chan;
+	dma_dev->device_prep_slave_sg = cdma_prep_slave_sg;
+
+	dma_dev->device_config = cdma_slave_config;
+	dma_dev->device_synchronize = cdma_synchronize;
+	dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+}
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index ea185c5de1b2..6fade7b4f820 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -22,257 +22,7 @@
 
 #include "../dmaengine.h"
 #include "../virt-dma.h"
-
-#define DRIVER_NAME			"lgm-dma"
-
-#define DMA_ID				0x0008
-#define DMA_ID_REV			GENMASK(7, 0)
-#define DMA_ID_PNR			GENMASK(19, 16)
-#define DMA_ID_CHNR			GENMASK(26, 20)
-#define DMA_ID_DW_128B			BIT(27)
-#define DMA_ID_AW_36B			BIT(28)
-#define DMA_VER32			0x32
-#define DMA_VER31			0x31
-#define DMA_VER22			0x0A
-
-#define DMA_CTRL			0x0010
-#define DMA_CTRL_RST			BIT(0)
-#define DMA_CTRL_DSRAM_PATH		BIT(1)
-#define DMA_CTRL_DBURST_WR		BIT(3)
-#define DMA_CTRL_VLD_DF_ACK		BIT(4)
-#define DMA_CTRL_CH_FL			BIT(6)
-#define DMA_CTRL_DS_FOD			BIT(7)
-#define DMA_CTRL_DRB			BIT(8)
-#define DMA_CTRL_ENBE			BIT(9)
-#define DMA_CTRL_DESC_TMOUT_CNT_V31	GENMASK(27, 16)
-#define DMA_CTRL_DESC_TMOUT_EN_V31	BIT(30)
-#define DMA_CTRL_PKTARB			BIT(31)
-
-#define DMA_CPOLL			0x0014
-#define DMA_CPOLL_CNT			GENMASK(15, 4)
-#define DMA_CPOLL_EN			BIT(31)
-
-#define DMA_CS				0x0018
-#define DMA_CS_MASK			GENMASK(5, 0)
-
-#define DMA_CCTRL			0x001C
-#define DMA_CCTRL_ON			BIT(0)
-#define DMA_CCTRL_RST			BIT(1)
-#define DMA_CCTRL_CH_POLL_EN		BIT(2)
-#define DMA_CCTRL_CH_ABC		BIT(3) /* Adaptive Burst Chop */
-#define DMA_CDBA_MSB			GENMASK(7, 4)
-#define DMA_CCTRL_DIR_TX		BIT(8)
-#define DMA_CCTRL_CLASS			GENMASK(11, 9)
-#define DMA_CCTRL_CLASSH		GENMASK(19, 18)
-#define DMA_CCTRL_WR_NP_EN		BIT(21)
-#define DMA_CCTRL_PDEN			BIT(23)
-#define DMA_MAX_CLASS			(SZ_32 - 1)
-
-#define DMA_CDBA			0x0020
-#define DMA_CDLEN			0x0024
-#define DMA_CIS				0x0028
-#define DMA_CIE				0x002C
-#define DMA_CI_EOP			BIT(1)
-#define DMA_CI_DUR			BIT(2)
-#define DMA_CI_DESCPT			BIT(3)
-#define DMA_CI_CHOFF			BIT(4)
-#define DMA_CI_RDERR			BIT(5)
-#define DMA_CI_ALL							\
-	(DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR)
-
-#define DMA_PS				0x0040
-#define DMA_PCTRL			0x0044
-#define DMA_PCTRL_RXBL16		BIT(0)
-#define DMA_PCTRL_TXBL16		BIT(1)
-#define DMA_PCTRL_RXBL			GENMASK(3, 2)
-#define DMA_PCTRL_RXBL_8		3
-#define DMA_PCTRL_TXBL			GENMASK(5, 4)
-#define DMA_PCTRL_TXBL_8		3
-#define DMA_PCTRL_PDEN			BIT(6)
-#define DMA_PCTRL_RXBL32		BIT(7)
-#define DMA_PCTRL_RXENDI		GENMASK(9, 8)
-#define DMA_PCTRL_TXENDI		GENMASK(11, 10)
-#define DMA_PCTRL_TXBL32		BIT(15)
-#define DMA_PCTRL_MEM_FLUSH		BIT(16)
-
-#define DMA_IRNEN1			0x00E8
-#define DMA_IRNCR1			0x00EC
-#define DMA_IRNEN			0x00F4
-#define DMA_IRNCR			0x00F8
-#define DMA_C_DP_TICK			0x100
-#define DMA_C_DP_TICK_TIKNARB		GENMASK(15, 0)
-#define DMA_C_DP_TICK_TIKARB		GENMASK(31, 16)
-
-#define DMA_C_HDRM			0x110
-/*
- * If header mode is set in DMA descriptor,
- *   If bit 30 is disabled, HDR_LEN must be configured according to channel
- *     requirement.
- *   If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to
- *     be configured. It will enable check sum for switch
- * If header mode is not set in DMA descriptor,
- *   This register setting doesn't matter
- */
-#define DMA_C_HDRM_HDR_SUM		BIT(30)
-
-#define DMA_C_BOFF			0x120
-#define DMA_C_BOFF_BOF_LEN		GENMASK(7, 0)
-#define DMA_C_BOFF_EN			BIT(31)
-
-#define DMA_ORRC			0x190
-#define DMA_ORRC_ORRCNT			GENMASK(8, 4)
-#define DMA_ORRC_EN			BIT(31)
-
-#define DMA_C_ENDIAN			0x200
-#define DMA_C_END_DATAENDI		GENMASK(1, 0)
-#define DMA_C_END_DE_EN			BIT(7)
-#define DMA_C_END_DESENDI		GENMASK(9, 8)
-#define DMA_C_END_DES_EN		BIT(16)
-
-/* DMA controller capability */
-#define DMA_ADDR_36BIT			BIT(0)
-#define DMA_DATA_128BIT			BIT(1)
-#define DMA_CHAN_FLOW_CTL		BIT(2)
-#define DMA_DESC_FOD			BIT(3)
-#define DMA_DESC_IN_SRAM		BIT(4)
-#define DMA_EN_BYTE_EN			BIT(5)
-#define DMA_DBURST_WR			BIT(6)
-#define DMA_VALID_DESC_FETCH_ACK	BIT(7)
-#define DMA_DFT_DRB			BIT(8)
-
-#define DMA_ORRC_MAX_CNT		16
-#define DMA_DFT_POLL_CNT		SZ_4
-#define DMA_DFT_BURST_V22		SZ_2
-#define DMA_BURSTL_8DW			SZ_8
-#define DMA_BURSTL_16DW			SZ_16
-#define DMA_BURSTL_32DW			SZ_32
-#define DMA_DFT_BURST			DMA_BURSTL_16DW
-#define DMA_MAX_DESC_NUM		(SZ_8K - 1)
-#define DMA_CHAN_BOFF_MAX		(SZ_256 - 1)
-#define DMA_DFT_ENDIAN			0
-
-#define DMA_DFT_DESC_TCNT		50
-#define DMA_HDR_LEN_MAX			(SZ_16K - 1)
-
-/* DMA flags */
-#define DMA_TX_CH			BIT(0)
-#define DMA_RX_CH			BIT(1)
-#define DEVICE_ALLOC_DESC		BIT(2)
-#define CHAN_IN_USE			BIT(3)
-#define DMA_HW_DESC			BIT(4)
-
-/* Descriptor fields */
-#define DESC_DATA_LEN			GENMASK(15, 0)
-#define DESC_BYTE_OFF			GENMASK(25, 23)
-#define DESC_EOP			BIT(28)
-#define DESC_SOP			BIT(29)
-#define DESC_C				BIT(30)
-#define DESC_OWN			BIT(31)
-
-#define DMA_CHAN_RST			1
-#define DMA_MAX_SIZE			(BIT(16) - 1)
-#define MAX_LOWER_CHANS			32
-#define MASK_LOWER_CHANS		GENMASK(4, 0)
-#define DMA_OWN				1
-#define HIGH_4_BITS			GENMASK(3, 0)
-#define DMA_DFT_DESC_NUM		1
-#define DMA_PKT_DROP_DIS		0
-
-enum ldma_chan_on_off {
-	DMA_CH_OFF = 0,
-	DMA_CH_ON = 1,
-};
-
-enum {
-	DMA_TYPE_INVD = -1, /* Legacy DMA does not have type */
-	DMA_TYPE_TX = 0,
-	DMA_TYPE_RX,
-	DMA_TYPE_MCPY,
-};
-
-enum {
-	DMA_IN_HW_MODE,
-	DMA_IN_SW_MODE,
-};
-
-struct ldma_dev;
-struct ldma_port;
-
-struct ldma_chan {
-	struct virt_dma_chan	vchan;
-	struct ldma_port	*port; /* back pointer */
-	char			name[8]; /* Channel name */
-	int			nr; /* Channel id in hardware */
-	u32			flags; /* central way or channel based way */
-	enum ldma_chan_on_off	onoff;
-	dma_addr_t		desc_phys;
-	void			*desc_base; /* Virtual address */
-	u32			desc_cnt; /* Number of descriptors */
-	int			rst;
-	u32			hdrm_len;
-	bool			hdrm_csum;
-	u32			boff_len;
-	u32			data_endian;
-	u32			desc_endian;
-	bool			pden;
-	bool			desc_rx_np;
-	bool			data_endian_en;
-	bool			desc_endian_en;
-	bool			abc_en;
-	bool			desc_init;
-	struct dma_pool		*desc_pool; /* Descriptors pool */
-	u32			desc_num;
-	struct dw2_desc_sw	*ds;
-	struct work_struct	work;
-	struct dma_slave_config config;
-	int			mode;
-};
-
-struct ldma_port {
-	struct ldma_dev		*ldev; /* back pointer */
-	u32			portid;
-	u32			rxbl;
-	u32			txbl;
-	u32			rxendi;
-	u32			txendi;
-	u32			pkt_drop;
-};
-
-struct ldma_dev {
-	struct device		*dev;
-	void __iomem		*base;
-	struct reset_control	*rst;
-	struct clk		*core_clk;
-	struct dma_device	dma_dev;
-	u32			ver;
-	int			irq;
-	struct ldma_port	*ports;
-	struct ldma_chan	*chans; /* channel list on this DMA or port */
-	spinlock_t		dev_lock; /* Controller register exclusive */
-	u32			chan_nrs;
-	u32			port_nrs;
-	u32			channels_mask;
-	u32			flags;
-	u32			pollcnt;
-	u32			orrc; /* Outstanding read count */
-	int			type;
-	const char		*name;
-	struct workqueue_struct	*wq;
-};
-
-struct dw2_desc {
-	u32 field;
-	u32 addr;
-} __packed __aligned(8);
-
-struct dw2_desc_sw {
-	struct virt_dma_desc	vdesc;
-	struct ldma_chan	*chan;
-	dma_addr_t		desc_phys;
-	size_t			desc_cnt;
-	size_t			size;
-	struct dw2_desc		*desc_hw;
-};
+#include "lgm-dma.h"
 
 static inline void
 ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
@@ -286,21 +36,6 @@ ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
 		writel(new_val, d->base + ofs);
 }
 
-static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan)
-{
-	return container_of(chan, struct ldma_chan, vchan.chan);
-}
-
-static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev)
-{
-	return container_of(dma_dev, struct ldma_dev, dma_dev);
-}
-
-static inline struct dw2_desc_sw *to_lgm_dma_desc(struct virt_dma_desc *vdesc)
-{
-	return container_of(vdesc, struct dw2_desc_sw, vdesc);
-}
-
 static inline bool ldma_chan_tx(struct ldma_chan *c)
 {
 	return !!(c->flags & DMA_TX_CH);
@@ -589,7 +324,7 @@ static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
 			 DMA_CCTRL);
 }
 
-static int ldma_chan_on(struct ldma_chan *c)
+int ldma_chan_on(struct ldma_chan *c)
 {
 	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
 	unsigned long flags;
@@ -630,8 +365,8 @@ static int ldma_chan_off(struct ldma_chan *c)
 	return 0;
 }
 
-static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
-				  int desc_num)
+void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
+			   int desc_num)
 {
 	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
 	unsigned long flags;
@@ -653,43 +388,7 @@ static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
 	c->desc_init = true;
 }
 
-static struct dma_async_tx_descriptor *
-ldma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num)
-{
-	struct ldma_chan *c = to_ldma_chan(chan);
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
-	struct dma_async_tx_descriptor *tx;
-	struct dw2_desc_sw *ds;
-
-	if (!desc_num) {
-		dev_err(d->dev, "Channel %d must allocate descriptor first\n",
-			c->nr);
-		return NULL;
-	}
-
-	if (desc_num > DMA_MAX_DESC_NUM) {
-		dev_err(d->dev, "Channel %d descriptor number out of range %d\n",
-			c->nr, desc_num);
-		return NULL;
-	}
-
-	ldma_chan_desc_hw_cfg(c, desc_base, desc_num);
-
-	c->flags |= DMA_HW_DESC;
-	c->desc_cnt = desc_num;
-	c->desc_phys = desc_base;
-
-	ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
-	if (!ds)
-		return NULL;
-
-	tx = &ds->vdesc.tx;
-	dma_async_tx_descriptor_init(tx, chan);
-
-	return tx;
-}
-
-static int ldma_chan_reset(struct ldma_chan *c)
+int ldma_chan_reset(struct ldma_chan *c)
 {
 	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
 	unsigned long flags;
@@ -915,8 +614,6 @@ static void ldma_dev_init(struct ldma_dev *d)
 static int ldma_parse_dt(struct ldma_dev *d)
 {
 	struct fwnode_handle *fwnode = dev_fwnode(d->dev);
-	struct ldma_port *p;
-	int i;
 
 	if (fwnode_property_read_bool(fwnode, "intel,dma-byte-en"))
 		d->flags |= DMA_EN_BYTE_EN;
@@ -964,112 +661,10 @@ static int ldma_parse_dt(struct ldma_dev *d)
 		return -EINVAL;
 	}
 
-	if (d->ver > DMA_VER22) {
-		for (i = 0; i < d->port_nrs; i++) {
-			p = &d->ports[i];
-			p->rxendi = DMA_DFT_ENDIAN;
-			p->txendi = DMA_DFT_ENDIAN;
-			p->rxbl = DMA_DFT_BURST;
-			p->txbl = DMA_DFT_BURST;
-			p->pkt_drop = DMA_PKT_DROP_DIS;
-		}
-	}
-
 	return 0;
 }
 
-static void dma_free_desc_resource(struct virt_dma_desc *vdesc)
-{
-	struct dw2_desc_sw *ds = to_lgm_dma_desc(vdesc);
-	struct ldma_chan *c = ds->chan;
-
-	dma_pool_free(c->desc_pool, ds->desc_hw, ds->desc_phys);
-	kfree(ds);
-}
-
-static struct dw2_desc_sw *
-dma_alloc_desc_resource(int num, struct ldma_chan *c)
-{
-	struct device *dev = c->vchan.chan.device->dev;
-	struct dw2_desc_sw *ds;
-
-	if (num > c->desc_num) {
-		dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_num);
-		return NULL;
-	}
-
-	ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
-	if (!ds)
-		return NULL;
-
-	ds->chan = c;
-	ds->desc_hw = dma_pool_zalloc(c->desc_pool, GFP_ATOMIC,
-				      &ds->desc_phys);
-	if (!ds->desc_hw) {
-		dev_dbg(dev, "out of memory for link descriptor\n");
-		kfree(ds);
-		return NULL;
-	}
-	ds->desc_cnt = num;
-
-	return ds;
-}
-
-static void ldma_chan_irq_en(struct ldma_chan *c)
-{
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
-	unsigned long flags;
-
-	spin_lock_irqsave(&d->dev_lock, flags);
-	writel(c->nr, d->base + DMA_CS);
-	writel(DMA_CI_EOP, d->base + DMA_CIE);
-	writel(BIT(c->nr), d->base + DMA_IRNEN);
-	spin_unlock_irqrestore(&d->dev_lock, flags);
-}
-
-static void ldma_issue_pending(struct dma_chan *chan)
-{
-	struct ldma_chan *c = to_ldma_chan(chan);
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
-	unsigned long flags;
-
-	if (d->ver == DMA_VER22) {
-		spin_lock_irqsave(&c->vchan.lock, flags);
-		if (vchan_issue_pending(&c->vchan)) {
-			struct virt_dma_desc *vdesc;
-
-			/* Get the next descriptor */
-			vdesc = vchan_next_desc(&c->vchan);
-			if (!vdesc) {
-				c->ds = NULL;
-				spin_unlock_irqrestore(&c->vchan.lock, flags);
-				return;
-			}
-			list_del(&vdesc->node);
-			c->ds = to_lgm_dma_desc(vdesc);
-			ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt);
-			ldma_chan_irq_en(c);
-		}
-		spin_unlock_irqrestore(&c->vchan.lock, flags);
-	}
-	ldma_chan_on(c);
-}
-
-static void ldma_synchronize(struct dma_chan *chan)
-{
-	struct ldma_chan *c = to_ldma_chan(chan);
-
-	/*
-	 * clear any pending work if any. In that
-	 * case the resource needs to be free here.
-	 */
-	cancel_work_sync(&c->work);
-	vchan_synchronize(&c->vchan);
-	if (c->ds)
-		dma_free_desc_resource(&c->ds->vdesc);
-}
-
-static int ldma_terminate_all(struct dma_chan *chan)
+int ldma_terminate_all(struct dma_chan *chan)
 {
 	struct ldma_chan *c = to_ldma_chan(chan);
 	unsigned long flags;
@@ -1083,7 +678,7 @@ static int ldma_terminate_all(struct dma_chan *chan)
 	return ldma_chan_reset(c);
 }
 
-static int ldma_resume_chan(struct dma_chan *chan)
+int ldma_resume_chan(struct dma_chan *chan)
 {
 	struct ldma_chan *c = to_ldma_chan(chan);
 
@@ -1092,282 +687,31 @@ static int ldma_resume_chan(struct dma_chan *chan)
 	return 0;
 }
 
-static int ldma_pause_chan(struct dma_chan *chan)
+int ldma_pause_chan(struct dma_chan *chan)
 {
 	struct ldma_chan *c = to_ldma_chan(chan);
 
 	return ldma_chan_off(c);
 }
 
-static enum dma_status
-ldma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
-	       struct dma_tx_state *txstate)
+static u32
+chan_burst_len(struct ldma_chan *c, struct ldma_port *p, u32 burst)
 {
-	struct ldma_chan *c = to_ldma_chan(chan);
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
-	enum dma_status status = DMA_COMPLETE;
+	struct ldma_dev *d = p->ldev;
 
 	if (d->ver == DMA_VER22)
-		status = dma_cookie_status(chan, cookie, txstate);
-
-	return status;
-}
-
-static void dma_chan_irq(int irq, void *data)
-{
-	struct ldma_chan *c = data;
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
-	u32 stat;
-
-	/* Disable channel interrupts  */
-	writel(c->nr, d->base + DMA_CS);
-	stat = readl(d->base + DMA_CIS);
-	if (!stat)
-		return;
-
-	writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE);
-	writel(stat, d->base + DMA_CIS);
-	queue_work(d->wq, &c->work);
-}
-
-static irqreturn_t dma_interrupt(int irq, void *dev_id)
-{
-	struct ldma_dev *d = dev_id;
-	struct ldma_chan *c;
-	unsigned long irncr;
-	u32 cid;
-
-	irncr = readl(d->base + DMA_IRNCR);
-	if (!irncr) {
-		dev_err(d->dev, "dummy interrupt\n");
-		return IRQ_NONE;
-	}
-
-	for_each_set_bit(cid, &irncr, d->chan_nrs) {
-		/* Mask */
-		writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN);
-		/* Ack */
-		writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR);
-
-		c = &d->chans[cid];
-		dma_chan_irq(irq, c);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static void prep_slave_burst_len(struct ldma_chan *c)
-{
-	struct ldma_port *p = c->port;
-	struct dma_slave_config *cfg = &c->config;
-
-	if (cfg->dst_maxburst)
-		cfg->src_maxburst = cfg->dst_maxburst;
-
-	/* TX and RX has the same burst length */
-	p->txbl = ilog2(cfg->src_maxburst);
-	p->rxbl = p->txbl;
-}
-
-static struct dma_async_tx_descriptor *
-ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-		   unsigned int sglen, enum dma_transfer_direction dir,
-		   unsigned long flags, void *context)
-{
-	struct ldma_chan *c = to_ldma_chan(chan);
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
-	size_t len, avail, total = 0;
-	struct dw2_desc *hw_ds;
-	struct dw2_desc_sw *ds;
-	struct scatterlist *sg;
-	int num = sglen, i;
-	dma_addr_t addr;
-
-	if (!sgl)
-		return NULL;
-
-	if (d->ver > DMA_VER22)
-		return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen);
-
-	for_each_sg(sgl, sg, sglen, i) {
-		avail = sg_dma_len(sg);
-		if (avail > DMA_MAX_SIZE)
-			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
-	}
-
-	ds = dma_alloc_desc_resource(num, c);
-	if (!ds)
-		return NULL;
-
-	c->ds = ds;
-
-	num = 0;
-	/* sop and eop has to be handled nicely */
-	for_each_sg(sgl, sg, sglen, i) {
-		addr = sg_dma_address(sg);
-		avail = sg_dma_len(sg);
-		total += avail;
-
-		do {
-			len = min_t(size_t, avail, DMA_MAX_SIZE);
-
-			hw_ds = &ds->desc_hw[num];
-			switch (sglen) {
-			case 1:
-				hw_ds->field &= ~DESC_SOP;
-				hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
-
-				hw_ds->field &= ~DESC_EOP;
-				hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
-				break;
-			default:
-				if (num == 0) {
-					hw_ds->field &= ~DESC_SOP;
-					hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
-
-					hw_ds->field &= ~DESC_EOP;
-					hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
-				} else if (num == (sglen - 1)) {
-					hw_ds->field &= ~DESC_SOP;
-					hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
-					hw_ds->field &= ~DESC_EOP;
-					hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
-				} else {
-					hw_ds->field &= ~DESC_SOP;
-					hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
-
-					hw_ds->field &= ~DESC_EOP;
-					hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
-				}
-				break;
-			}
-			/* Only 32 bit address supported */
-			hw_ds->addr = (u32)addr;
-
-			hw_ds->field &= ~DESC_DATA_LEN;
-			hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len);
-
-			hw_ds->field &= ~DESC_C;
-			hw_ds->field |= FIELD_PREP(DESC_C, 0);
-
-			hw_ds->field &= ~DESC_BYTE_OFF;
-			hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3);
-
-			/* Ensure data ready before ownership change */
-			wmb();
-			hw_ds->field &= ~DESC_OWN;
-			hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN);
-
-			/* Ensure ownership changed before moving forward */
-			wmb();
-			num++;
-			addr += len;
-			avail -= len;
-		} while (avail);
-	}
-
-	ds->size = total;
-	prep_slave_burst_len(c);
-
-	return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK);
-}
-
-static int
-ldma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
-{
-	struct ldma_chan *c = to_ldma_chan(chan);
-
-	memcpy(&c->config, cfg, sizeof(c->config));
-
-	return 0;
-}
-
-static int ldma_alloc_chan_resources(struct dma_chan *chan)
-{
-	struct ldma_chan *c = to_ldma_chan(chan);
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
-	struct device *dev = c->vchan.chan.device->dev;
-	size_t	desc_sz;
-
-	if (d->ver > DMA_VER22) {
-		c->flags |= CHAN_IN_USE;
-		return 0;
-	}
-
-	if (c->desc_pool)
-		return c->desc_num;
-
-	desc_sz = c->desc_num * sizeof(struct dw2_desc);
-	c->desc_pool = dma_pool_create(c->name, dev, desc_sz,
-				       __alignof__(struct dw2_desc), 0);
-
-	if (!c->desc_pool) {
-		dev_err(dev, "unable to allocate descriptor pool\n");
-		return -ENOMEM;
-	}
-
-	return c->desc_num;
-}
-
-static void ldma_free_chan_resources(struct dma_chan *chan)
-{
-	struct ldma_chan *c = to_ldma_chan(chan);
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
-
-	if (d->ver == DMA_VER22) {
-		dma_pool_destroy(c->desc_pool);
-		c->desc_pool = NULL;
-		vchan_free_chan_resources(to_virt_chan(chan));
-		ldma_chan_reset(c);
-	} else {
-		c->flags &= ~CHAN_IN_USE;
-	}
-}
-
-static void dma_work(struct work_struct *work)
-{
-	struct ldma_chan *c = container_of(work, struct ldma_chan, work);
-	struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx;
-	struct virt_dma_chan *vc = &c->vchan;
-	struct dmaengine_desc_callback cb;
-	struct virt_dma_desc *vd, *_vd;
-	unsigned long flags;
-	LIST_HEAD(head);
-
-	spin_lock_irqsave(&c->vchan.lock, flags);
-	list_splice_tail_init(&vc->desc_completed, &head);
-	spin_unlock_irqrestore(&c->vchan.lock, flags);
-	dmaengine_desc_get_callback(tx, &cb);
-	dma_cookie_complete(tx);
-	dmaengine_desc_callback_invoke(&cb, NULL);
-
-	list_for_each_entry_safe(vd, _vd, &head, node) {
-		dmaengine_desc_get_callback(tx, &cb);
-		dma_cookie_complete(tx);
-		list_del(&vd->node);
-		dmaengine_desc_callback_invoke(&cb, NULL);
-
-		vchan_vdesc_fini(vd);
-	}
-	c->ds = NULL;
-}
-
-static void
-update_burst_len_v22(struct ldma_chan *c, struct ldma_port *p, u32 burst)
-{
-	if (ldma_chan_tx(c))
-		p->txbl = ilog2(burst);
+		return ilog2(burst);
 	else
-		p->rxbl = ilog2(burst);
+		return burst;
 }
 
 static void
-update_burst_len_v3X(struct ldma_chan *c, struct ldma_port *p, u32 burst)
+update_burst_len(struct ldma_chan *c, struct ldma_port *p, u32 burst)
 {
 	if (ldma_chan_tx(c))
-		p->txbl = burst;
+		p->txbl = chan_burst_len(c, p, burst);
 	else
-		p->rxbl = burst;
+		p->rxbl = chan_burst_len(c, p, burst);
 }
 
 static int
@@ -1387,10 +731,7 @@ update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec)
 	c = &d->chans[chan_id];
 	c->port = p;
 
-	if (d->ver == DMA_VER22)
-		update_burst_len_v22(c, p, burst);
-	else
-		update_burst_len_v3X(c, p, burst);
+	update_burst_len(c, p, burst);
 
 	ldma_port_cfg(p);
 
@@ -1417,66 +758,9 @@ static struct dma_chan *ldma_xlate(struct of_phandle_args *spec,
 	return dma_get_slave_channel(&d->chans[chan_id].vchan.chan);
 }
 
-static void ldma_dma_init_v22(int i, struct ldma_dev *d)
+static int ldma_irq_init(struct ldma_dev *d, struct platform_device *pdev)
 {
-	struct ldma_chan *c;
-
-	c = &d->chans[i];
-	c->nr = i; /* Real channel number */
-	c->rst = DMA_CHAN_RST;
-	c->desc_num = DMA_DFT_DESC_NUM;
-	snprintf(c->name, sizeof(c->name), "chan%d", c->nr);
-	INIT_WORK(&c->work, dma_work);
-	c->vchan.desc_free = dma_free_desc_resource;
-	vchan_init(&c->vchan, &d->dma_dev);
-}
-
-static void ldma_dma_init_v3X(int i, struct ldma_dev *d)
-{
-	struct ldma_chan *c;
-
-	c = &d->chans[i];
-	c->data_endian = DMA_DFT_ENDIAN;
-	c->desc_endian = DMA_DFT_ENDIAN;
-	c->data_endian_en = false;
-	c->desc_endian_en = false;
-	c->desc_rx_np = false;
-	c->flags |= DEVICE_ALLOC_DESC;
-	c->onoff = DMA_CH_OFF;
-	c->rst = DMA_CHAN_RST;
-	c->abc_en = true;
-	c->hdrm_csum = false;
-	c->boff_len = 0;
-	c->nr = i;
-	c->vchan.desc_free = dma_free_desc_resource;
-	vchan_init(&c->vchan, &d->dma_dev);
-}
-
-static int ldma_init_v22(struct ldma_dev *d, struct platform_device *pdev)
-{
-	int ret;
-
-	ret = device_property_read_u32(d->dev, "dma-channels", &d->chan_nrs);
-	if (ret < 0) {
-		dev_err(d->dev, "unable to read dma-channels property\n");
-		return ret;
-	}
-
-	d->irq = platform_get_irq(pdev, 0);
-	if (d->irq < 0)
-		return d->irq;
-
-	ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt, 0,
-			       DRIVER_NAME, d);
-	if (ret)
-		return ret;
-
-	d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM |
-			WQ_HIGHPRI);
-	if (!d->wq)
-		return -ENOMEM;
-
-	return 0;
+	return d->ops->dma_irq_init(d, pdev);
 }
 
 static void ldma_clk_disable(void *data)
@@ -1487,14 +771,24 @@ static void ldma_clk_disable(void *data)
 	reset_control_assert(d->rst);
 }
 
-static int intel_ldma_port_channel_init(struct ldma_dev *d)
+static const struct of_device_id intel_ldma_match[] = {
+	{ .compatible = "intel,lgm-cdma", .data = &cdma_ops },
+	{ .compatible = "intel,lgm-hdma", .data = &hdma_ops },
+	{}
+};
+
+/* Initialize DMA controller, port, channel structures */
+static int ldma_init(struct ldma_dev *d)
 {
 	struct ldma_chan *c;
 	struct ldma_port *p;
 	unsigned long ch_mask;
-	int i,j;
+	int i, ret;
+
+	ret = d->ops->dma_ctrl_init(d);
+	if (ret)
+		return ret;
 
-	/* Port Initializations */
 	d->ports = devm_kcalloc(d->dev, d->port_nrs, sizeof(*p), GFP_KERNEL);
 	if (!d->ports)
 		return -ENOMEM;
@@ -1507,31 +801,23 @@ static int intel_ldma_port_channel_init(struct ldma_dev *d)
 	for (i = 0; i < d->port_nrs; i++) {
 		p = &d->ports[i];
 		p->portid = i;
-		p->ldev = d;
-
-		p->rxendi = DMA_DFT_ENDIAN;
-		p->txendi = DMA_DFT_ENDIAN;
-		p->rxbl = DMA_DFT_BURST;
-		p->txbl = DMA_DFT_BURST;
-		p->pkt_drop = DMA_PKT_DROP_DIS;
+		ret = d->ops->dma_port_init(d, p);
+		if (ret)
+			return ret;
 	}
 
 	ch_mask = (unsigned long)d->channels_mask;
-	for_each_set_bit(j, &ch_mask, d->chan_nrs) {
-		if (d->ver == DMA_VER22)
-			ldma_dma_init_v22(j, d);
-		else
-			ldma_dma_init_v3X(j, d);
+	for_each_set_bit(i, &ch_mask, d->chan_nrs) {
+		c = &d->chans[i];
+		c->nr = i;
+		ret = d->ops->dma_chan_init(d, c);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
 }
 
-static const struct of_device_id intel_ldma_match[] = {
-	{ .compatible = "intel,lgm-ldma" },
-	{}
-};
-
 static int intel_ldma_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -1547,6 +833,12 @@ static int intel_ldma_probe(struct platform_device *pdev)
 	/* Link controller to platform device */
 	d->dev = &pdev->dev;
 
+	d->ops = device_get_match_data(dev);
+	if (!d->ops) {
+		dev_err(dev, "No device match found!\n");
+		return -ENODEV;
+	}
+
 	d->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(d->base))
 		return PTR_ERR(d->base);
@@ -1593,11 +885,13 @@ static int intel_ldma_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	if (d->ver == DMA_VER22) {
-		ret = ldma_init_v22(d, pdev);
-		if (ret)
-			return ret;
-	}
+	ret = ldma_init(d);
+	if (ret)
+		return ret;
+
+	ret = ldma_irq_init(d, pdev);
+	if (ret)
+		return ret;
 
 	dma_dev = &d->dma_dev;
 	dma_dev->dev = &pdev->dev;
@@ -1608,29 +902,8 @@ static int intel_ldma_probe(struct platform_device *pdev)
 	/* Channel initializations */
 	INIT_LIST_HEAD(&dma_dev->channels);
 
-	ret = intel_ldma_port_channel_init(d);
-	if (ret)
-		return ret;
-
-	dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
-	dma_dev->device_free_chan_resources = ldma_free_chan_resources;
-	dma_dev->device_terminate_all = ldma_terminate_all;
-	dma_dev->device_issue_pending = ldma_issue_pending;
-	dma_dev->device_tx_status = ldma_tx_status;
-	dma_dev->device_resume = ldma_resume_chan;
-	dma_dev->device_pause = ldma_pause_chan;
-	dma_dev->device_prep_slave_sg = ldma_prep_slave_sg;
-
-	if (d->ver == DMA_VER22) {
-		dma_dev->device_config = ldma_slave_config;
-		dma_dev->device_synchronize = ldma_synchronize;
-		dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-		dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-		dma_dev->directions = BIT(DMA_MEM_TO_DEV) |
-				      BIT(DMA_DEV_TO_MEM);
-		dma_dev->residue_granularity =
-					DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
-	}
+	/* init dma callback functions */
+	d->ops->dma_func_init(dma_dev);
 
 	platform_set_drvdata(pdev, d);
 
diff --git a/drivers/dma/lgm/lgm-dma.h b/drivers/dma/lgm/lgm-dma.h
new file mode 100644
index 000000000000..ff5aa5142019
--- /dev/null
+++ b/drivers/dma/lgm/lgm-dma.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions related to LGM DMA.
+ *
+ * Copyright (c) 2025 Maxlinear Inc.
+ */
+
+#ifndef _LGM_DMA_H
+#define _LGM_DMA_H
+
+enum ldma_chan_on_off {
+	DMA_CH_OFF = 0,
+	DMA_CH_ON = 1,
+};
+
+enum {
+	DMA_TYPE_INVD = -1, /* Legacy DMA does not have type */
+	DMA_TYPE_TX = 0,
+	DMA_TYPE_RX,
+	DMA_TYPE_MCPY,
+};
+
+enum {
+	DMA_IN_HW_MODE,
+	DMA_IN_SW_MODE,
+};
+
+struct ldma_dev;
+struct ldma_port;
+struct ldma_chan;
+
+struct ldma_ops {
+	/* DMA control level init */
+	int (*dma_ctrl_init)(struct ldma_dev *d);
+	/* DMA port level init */
+	int (*dma_port_init)(struct ldma_dev *d, struct ldma_port *p);
+	/* DMA channel level init */
+	int (*dma_chan_init)(struct ldma_dev *d, struct ldma_chan *c);
+	/* DMA interrupt init */
+	int (*dma_irq_init)(struct ldma_dev *d, struct platform_device *pdev);
+	/* DMA callback API init */
+	void (*dma_func_init)(struct dma_device *dma_dev);
+};
+
+struct ldma_chan {
+	struct virt_dma_chan	vchan;
+	struct ldma_port	*port; /* back pointer */
+	char			name[8]; /* Channel name */
+	int			nr; /* Channel id in hardware */
+	u32			flags; /* central way or channel based way */
+	enum ldma_chan_on_off	onoff;
+	dma_addr_t		desc_phys;
+	void			*desc_base; /* Virtual address */
+	u32			desc_cnt; /* Number of descriptors */
+	int			rst;
+	u32			hdrm_len;
+	bool			hdrm_csum;
+	u32			boff_len;
+	u32			data_endian;
+	u32			desc_endian;
+	bool			pden;
+	bool			desc_rx_np;
+	bool			data_endian_en;
+	bool			desc_endian_en;
+	bool			abc_en;
+	bool			desc_init;
+	void			*priv;
+};
+
+struct ldma_port {
+	struct ldma_dev		*ldev; /* back pointer */
+	u32			portid;
+	u32			rxbl;
+	u32			txbl;
+	u32			rxendi;
+	u32			txendi;
+	u32			pkt_drop;
+};
+
+struct ldma_dev {
+	struct device		*dev;
+	void __iomem		*base;
+	struct reset_control	*rst;
+	struct clk		*core_clk;
+	struct dma_device	dma_dev;
+	u32			ver;
+	int			irq;
+	struct ldma_port	*ports;
+	struct ldma_chan	*chans; /* channel list on this DMA or port */
+	spinlock_t		dev_lock; /* Controller register exclusive */
+	u32			chan_nrs;
+	u32			port_nrs;
+	u32			channels_mask;
+	u32			flags;
+	u32			pollcnt;
+	u32			orrc; /* Outstanding read count */
+	int			type;
+	const char		*name;
+	const struct ldma_ops	*ops;
+};
+
+extern struct ldma_ops cdma_ops;
+extern struct ldma_ops hdma_ops;
+
+#define DRIVER_NAME			"lgm-dma"
+
+#define DMA_ID				0x0008
+#define DMA_ID_REV			GENMASK(7, 0)
+#define DMA_ID_PNR			GENMASK(19, 16)
+#define DMA_ID_CHNR			GENMASK(26, 20)
+#define DMA_ID_DW_128B			BIT(27)
+#define DMA_ID_AW_36B			BIT(28)
+#define DMA_VER32			0x32
+#define DMA_VER31			0x31
+#define DMA_VER22			0x0A
+
+#define DMA_CTRL			0x0010
+#define DMA_CTRL_RST			BIT(0)
+#define DMA_CTRL_DSRAM_PATH		BIT(1)
+#define DMA_CTRL_DBURST_WR		BIT(3)
+#define DMA_CTRL_VLD_DF_ACK		BIT(4)
+#define DMA_CTRL_CH_FL			BIT(6)
+#define DMA_CTRL_DS_FOD			BIT(7)
+#define DMA_CTRL_DRB			BIT(8)
+#define DMA_CTRL_ENBE			BIT(9)
+#define DMA_CTRL_DESC_TMOUT_CNT_V31	GENMASK(27, 16)
+#define DMA_CTRL_DESC_TMOUT_EN_V31	BIT(30)
+#define DMA_CTRL_PKTARB			BIT(31)
+
+#define DMA_CPOLL			0x0014
+#define DMA_CPOLL_CNT			GENMASK(15, 4)
+#define DMA_CPOLL_EN			BIT(31)
+
+#define DMA_CS				0x0018
+#define DMA_CS_MASK			GENMASK(5, 0)
+
+#define DMA_CCTRL			0x001C
+#define DMA_CCTRL_ON			BIT(0)
+#define DMA_CCTRL_RST			BIT(1)
+#define DMA_CCTRL_CH_POLL_EN		BIT(2)
+#define DMA_CCTRL_CH_ABC		BIT(3) /* Adaptive Burst Chop */
+#define DMA_CDBA_MSB			GENMASK(7, 4)
+#define DMA_CCTRL_DIR_TX		BIT(8)
+#define DMA_CCTRL_CLASS			GENMASK(11, 9)
+#define DMA_CCTRL_CLASSH		GENMASK(19, 18)
+#define DMA_CCTRL_WR_NP_EN		BIT(21)
+#define DMA_CCTRL_PDEN			BIT(23)
+#define DMA_MAX_CLASS			(SZ_32 - 1)
+
+#define DMA_CDBA			0x0020
+#define DMA_CDLEN			0x0024
+#define DMA_CIS				0x0028
+#define DMA_CIE				0x002C
+#define DMA_CI_EOP			BIT(1)
+#define DMA_CI_DUR			BIT(2)
+#define DMA_CI_DESCPT			BIT(3)
+#define DMA_CI_CHOFF			BIT(4)
+#define DMA_CI_RDERR			BIT(5)
+#define DMA_CI_ALL							\
+	(DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR)
+
+#define DMA_PS				0x0040
+#define DMA_PCTRL			0x0044
+#define DMA_PCTRL_RXBL16		BIT(0)
+#define DMA_PCTRL_TXBL16		BIT(1)
+#define DMA_PCTRL_RXBL			GENMASK(3, 2)
+#define DMA_PCTRL_RXBL_8		3
+#define DMA_PCTRL_TXBL			GENMASK(5, 4)
+#define DMA_PCTRL_TXBL_8		3
+#define DMA_PCTRL_PDEN			BIT(6)
+#define DMA_PCTRL_RXBL32		BIT(7)
+#define DMA_PCTRL_RXENDI		GENMASK(9, 8)
+#define DMA_PCTRL_TXENDI		GENMASK(11, 10)
+#define DMA_PCTRL_TXBL32		BIT(15)
+#define DMA_PCTRL_MEM_FLUSH		BIT(16)
+
+#define DMA_IRNEN1			0x00E8
+#define DMA_IRNCR1			0x00EC
+#define DMA_IRNEN			0x00F4
+#define DMA_IRNCR			0x00F8
+#define DMA_C_DP_TICK			0x100
+#define DMA_C_DP_TICK_TIKNARB		GENMASK(15, 0)
+#define DMA_C_DP_TICK_TIKARB		GENMASK(31, 16)
+
+#define DMA_C_HDRM			0x110
+/*
+ * If header mode is set in DMA descriptor,
+ *   If bit 30 is disabled, HDR_LEN must be configured according to channel
+ *     requirement.
+ *   If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to
+ *     be configured. It will enable check sum for switch
+ * If header mode is not set in DMA descriptor,
+ *   This register setting doesn't matter
+ */
+#define DMA_C_HDRM_HDR_SUM		BIT(30)
+
+#define DMA_C_BOFF			0x120
+#define DMA_C_BOFF_BOF_LEN		GENMASK(7, 0)
+#define DMA_C_BOFF_EN			BIT(31)
+
+#define DMA_ORRC			0x190
+#define DMA_ORRC_ORRCNT			GENMASK(8, 4)
+#define DMA_ORRC_EN			BIT(31)
+
+#define DMA_C_ENDIAN			0x200
+#define DMA_C_END_DATAENDI		GENMASK(1, 0)
+#define DMA_C_END_DE_EN			BIT(7)
+#define DMA_C_END_DESENDI		GENMASK(9, 8)
+#define DMA_C_END_DES_EN		BIT(16)
+
+/* DMA controller capability */
+#define DMA_ADDR_36BIT			BIT(0)
+#define DMA_DATA_128BIT			BIT(1)
+#define DMA_CHAN_FLOW_CTL		BIT(2)
+#define DMA_DESC_FOD			BIT(3)
+#define DMA_DESC_IN_SRAM		BIT(4)
+#define DMA_EN_BYTE_EN			BIT(5)
+#define DMA_DBURST_WR			BIT(6)
+#define DMA_VALID_DESC_FETCH_ACK	BIT(7)
+#define DMA_DFT_DRB			BIT(8)
+
+#define DMA_ORRC_MAX_CNT		16
+#define DMA_DFT_POLL_CNT		SZ_4
+#define DMA_DFT_BURST_V22		SZ_2
+#define DMA_BURSTL_8DW			SZ_8
+#define DMA_BURSTL_16DW			SZ_16
+#define DMA_BURSTL_32DW			SZ_32
+#define DMA_DFT_BURST			DMA_BURSTL_16DW
+#define DMA_MAX_DESC_NUM		(SZ_8K - 1)
+#define DMA_CHAN_BOFF_MAX		(SZ_256 - 1)
+#define DMA_DFT_ENDIAN			0
+
+#define DMA_DFT_DESC_TCNT		50
+#define DMA_HDR_LEN_MAX			(SZ_16K - 1)
+
+/* DMA flags */
+#define DMA_TX_CH			BIT(0)
+#define DMA_RX_CH			BIT(1)
+#define DEVICE_ALLOC_DESC		BIT(2)
+#define CHAN_IN_USE			BIT(3)
+#define DMA_HW_DESC			BIT(4)
+
+/* Descriptor fields */
+#define DESC_DATA_LEN			GENMASK(15, 0)
+#define DESC_BYTE_OFF			GENMASK(25, 23)
+#define DESC_EOP			BIT(28)
+#define DESC_SOP			BIT(29)
+#define DESC_C				BIT(30)
+#define DESC_OWN			BIT(31)
+
+#define DMA_CHAN_RST			1
+#define DMA_MAX_SIZE			(BIT(16) - 1)
+#define MAX_LOWER_CHANS			32
+#define MASK_LOWER_CHANS		GENMASK(4, 0)
+#define DMA_OWN				1
+#define HIGH_4_BITS			GENMASK(3, 0)
+#define DMA_DFT_DESC_NUM		1
+#define DMA_PKT_DROP_DIS		0
+
+static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct ldma_chan, vchan.chan);
+}
+
+static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev)
+{
+	return container_of(dma_dev, struct ldma_dev, dma_dev);
+}
+
+void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
+			   int desc_num);
+int ldma_terminate_all(struct dma_chan *chan);
+int ldma_resume_chan(struct dma_chan *chan);
+int ldma_pause_chan(struct dma_chan *chan);
+int ldma_chan_reset(struct ldma_chan *c);
+int ldma_chan_on(struct ldma_chan *c);
+
+#endif
diff --git a/drivers/dma/lgm/lgm-hdma.c b/drivers/dma/lgm/lgm-hdma.c
new file mode 100644
index 000000000000..9133aa31c47b
--- /dev/null
+++ b/drivers/dma/lgm/lgm-hdma.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lightning Mountain centralized DMA controller driver
+ *
+ * Copyright (c) 2025 Maxlinear Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+#include "lgm-dma.h"
+
+static int hdma_ctrl_init(struct ldma_dev *d);
+static int hdma_port_init(struct ldma_dev *d, struct ldma_port *p);
+static int hdma_chan_init(struct ldma_dev *d, struct ldma_chan *c);
+static int hdma_irq_init(struct ldma_dev *d, struct platform_device *pdev);
+static void hdma_func_init(struct dma_device *dma_dev);
+static void hdma_free_chan_resources(struct dma_chan *dma_chan);
+
+struct ldma_ops hdma_ops = {
+	.dma_ctrl_init = hdma_ctrl_init,
+	.dma_port_init = hdma_port_init,
+	.dma_chan_init = hdma_chan_init,
+	.dma_irq_init  = hdma_irq_init,
+	.dma_func_init = hdma_func_init,
+};
+
+static int hdma_ctrl_init(struct ldma_dev *d)
+{
+	return 0;
+}
+
+static int hdma_port_init(struct ldma_dev *d, struct ldma_port *p)
+{
+	p->ldev = d;
+	p->rxendi = DMA_DFT_ENDIAN;
+	p->txendi = DMA_DFT_ENDIAN;
+	p->rxbl = DMA_DFT_BURST;
+	p->txbl = DMA_DFT_BURST;
+	p->pkt_drop = DMA_PKT_DROP_DIS;
+
+	return 0;
+}
+
+static inline void hdma_free_desc_resource(struct virt_dma_desc *vdesc)
+{
+}
+
+static int hdma_chan_init(struct ldma_dev *d, struct ldma_chan *c)
+{
+	c->data_endian = DMA_DFT_ENDIAN;
+	c->desc_endian = DMA_DFT_ENDIAN;
+	c->data_endian_en = false;
+	c->desc_endian_en = false;
+	c->desc_rx_np = false;
+	c->flags |= DEVICE_ALLOC_DESC;
+	c->onoff = DMA_CH_OFF;
+	c->rst = DMA_CHAN_RST;
+	c->abc_en = true;
+	c->hdrm_csum = false;
+	c->boff_len = 0;
+	c->vchan.desc_free = hdma_free_desc_resource;
+	vchan_init(&c->vchan, &d->dma_dev);
+
+	return 0;
+}
+
+static int hdma_irq_init(struct ldma_dev *d, struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int hdma_alloc_chan_resources(struct dma_chan *dma_chan)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct device *dev = c->vchan.chan.device->dev;
+
+	dev_dbg(dev, "allocate channel resource!\n");
+
+	if (c->flags & DMA_HW_DESC) {
+		c->flags |= CHAN_IN_USE;
+		dev_dbg(dev, "desc in hw\n");
+	}
+
+	return 0;
+}
+
+static void hdma_free_chan_resources(struct dma_chan *dma_chan)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+
+	c->flags &= ~CHAN_IN_USE;
+}
+
+static void hdma_issue_pending(struct dma_chan *dma_chan)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+
+	ldma_chan_on(c);
+}
+
+static enum dma_status
+hdma_tx_status(struct dma_chan *dma_chan, dma_cookie_t cookie,
+	       struct dma_tx_state *txstate)
+{
+	return DMA_COMPLETE;
+}
+
+static void hdma_func_init(struct dma_device *dma_dev)
+{
+	dma_dev->device_alloc_chan_resources = hdma_alloc_chan_resources;
+	dma_dev->device_free_chan_resources = hdma_free_chan_resources;
+	dma_dev->device_terminate_all = ldma_terminate_all;
+	dma_dev->device_issue_pending = hdma_issue_pending;
+	dma_dev->device_tx_status = hdma_tx_status;
+	dma_dev->device_resume = ldma_resume_chan;
+	dma_dev->device_pause = ldma_pause_chan;
+}
-- 
2.43.5


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function.
  2025-07-30  2:45 [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Zhu Yixin
  2025-07-30  2:45 ` [PATCH 2/5] dmaengine: lgm-dma: Correct ORRC MAX counter value Zhu Yixin
  2025-07-30  2:45 ` [PATCH 3/5] dmaengine: lgm-dma: split legacy DMA and HDMA functions Zhu Yixin
@ 2025-07-30  2:45 ` Zhu Yixin
  2025-07-30  6:21   ` Krzysztof Kozlowski
  2025-07-30 16:17   ` kernel test robot
  2025-07-30  2:45 ` [PATCH 5/5] dmaengine: lgm_dma: Added HDMA RX interrupt handle functions Zhu Yixin
  2025-07-30  6:19 ` [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Krzysztof Kozlowski
  4 siblings, 2 replies; 14+ messages in thread
From: Zhu Yixin @ 2025-07-30  2:45 UTC (permalink / raw)
  To: dmaengine, vkoul; +Cc: jchng, sureshnagaraj, Zhu Yixin

Added HDMA software mode to handle DMA TX functions.

Signed-off-by: Zhu Yixin <yzhu@maxlinear.com>
---
 .../devicetree/bindings/dma/intel,ldma.yaml   |   6 +
 drivers/dma/lgm/lgm-cdma.c                    |  42 +-
 drivers/dma/lgm/lgm-dma.c                     | 189 +++++---
 drivers/dma/lgm/lgm-dma.h                     |  31 +-
 drivers/dma/lgm/lgm-hdma.c                    | 453 +++++++++++++++++-
 5 files changed, 608 insertions(+), 113 deletions(-)

diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
index f91d849edc4c..e58f1d13aee3 100644
--- a/Documentation/devicetree/bindings/dma/intel,ldma.yaml
+++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
@@ -118,6 +118,11 @@ properties:
     description:
       Name of the DMA.
 
+  intel,dma-hw-desc:
+    type: boolean
+    description:
+      DMA descriptor manupulated by Hardware.
+
 required:
   - compatible
   - reg
@@ -157,4 +162,5 @@ examples:
       intel,dma-desc-in-sram;
       intel,dma-name = "dma3";
       intel,dma-orrc = <16>;
+      intel,dma-hw-desc;
     };
diff --git a/drivers/dma/lgm/lgm-cdma.c b/drivers/dma/lgm/lgm-cdma.c
index 0acb30706c42..07dff684167d 100644
--- a/drivers/dma/lgm/lgm-cdma.c
+++ b/drivers/dma/lgm/lgm-cdma.c
@@ -22,6 +22,8 @@
 #include "../virt-dma.h"
 #include "lgm-dma.h"
 
+#define DESC_DATA_LEN		GENMASK(15, 0)
+
 struct dw2_desc {
 	u32 field;
 	u32 addr;
@@ -44,19 +46,14 @@ struct cdma_chan {
 	struct dma_slave_config config;
 };
 
-struct cdma_dev {
-	struct ldma_dev		*ldev;
-	struct workqueue_struct	*wq;
-};
-
 static int cdma_ctrl_init(struct ldma_dev *d);
 static int cdma_port_init(struct ldma_dev *d, struct ldma_port *p);
 static int cdma_chan_init(struct ldma_dev *d, struct ldma_chan *c);
 static int cdma_irq_init(struct ldma_dev *d, struct platform_device *pdev);
-static void cdma_func_init(struct dma_device *dma_dev);
+static void cdma_func_init(struct ldma_dev *d, struct dma_device *dma_dev);
 static irqreturn_t cdma_interrupt(int irq, void *dev_id);
 
-static struct cdma_dev *g_cdma_dev;
+static struct workqueue_struct	*wq_work;
 
 struct ldma_ops cdma_ops = {
 	.dma_ctrl_init = cdma_ctrl_init,
@@ -119,20 +116,10 @@ static void cdma_work(struct work_struct *work)
 
 static int cdma_ctrl_init(struct ldma_dev *d)
 {
-	struct cdma_dev *cdma;
-
-	cdma = devm_kzalloc(d->dev, sizeof(*cdma), GFP_KERNEL);
-	if (!cdma)
+	wq_work = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
+	if (!wq_work)
 		return -ENOMEM;
 
-	cdma->ldev = d;
-	cdma->wq = alloc_ordered_workqueue("dma_wq",
-					   WQ_MEM_RECLAIM | WQ_HIGHPRI);
-	if (!cdma->wq)
-		return -ENOMEM;
-
-	g_cdma_dev = cdma;
-
 	return 0;
 }
 
@@ -174,7 +161,7 @@ static int cdma_irq_init(struct ldma_dev *d, struct platform_device *pdev)
 static void cdma_chan_irq(int irq, void *data)
 {
 	struct ldma_chan *c = data;
-	struct ldma_dev *d = g_cdma_dev->ldev;
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	struct cdma_chan *chan;
 	u32 stat;
 
@@ -187,7 +174,7 @@ static void cdma_chan_irq(int irq, void *data)
 	writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE);
 	writel(stat, d->base + DMA_CIS);
 	chan = (struct cdma_chan *)c->priv;
-	queue_work(g_cdma_dev->wq, &chan->work);
+	queue_work(wq_work, &chan->work);
 }
 
 static irqreturn_t cdma_interrupt(int irq, void *dev_id)
@@ -219,7 +206,7 @@ static irqreturn_t cdma_interrupt(int irq, void *dev_id)
 static int cdma_alloc_chan_resources(struct dma_chan *dma_chan)
 {
 	struct ldma_chan *c = to_ldma_chan(dma_chan);
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
 	struct device *dev = d->dev;
 	size_t desc_sz;
@@ -279,7 +266,7 @@ cdma_slave_config(struct dma_chan *dma_chan, struct dma_slave_config *cfg)
 
 static void cdma_chan_irq_en(struct ldma_chan *c)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	unsigned long flags;
 
 	spin_lock_irqsave(&d->dev_lock, flags);
@@ -293,7 +280,6 @@ static void cdma_issue_pending(struct dma_chan *dma_chan)
 {
 	struct ldma_chan *c = to_ldma_chan(dma_chan);
 	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
-	//struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
 	unsigned long flags;
 
 	spin_lock_irqsave(&c->vchan.lock, flags);
@@ -332,12 +318,12 @@ cdma_tx_status(struct dma_chan *dma_chan, dma_cookie_t cookie,
 static struct dw2_desc_sw *
 cdma_alloc_desc_resource(int num, struct ldma_chan *c)
 {
-	struct device *dev = g_cdma_dev->ldev->dev;
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	struct cdma_chan *chan = (struct cdma_chan *)c->priv;
 	struct dw2_desc_sw *ds;
 
 	if (num > c->desc_cnt) {
-		dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_cnt);
+		dev_err(d->dev, "sg num %d exceed max %d\n", num, c->desc_cnt);
 		return NULL;
 	}
 
@@ -349,7 +335,7 @@ cdma_alloc_desc_resource(int num, struct ldma_chan *c)
 	ds->desc_hw = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC,
 				      &ds->desc_phys);
 	if (!ds->desc_hw) {
-		dev_dbg(dev, "out of memory for link descriptor\n");
+		dev_dbg(d->dev, "out of memory for link descriptor\n");
 		kfree(ds);
 		return NULL;
 	}
@@ -472,7 +458,7 @@ cdma_prep_slave_sg(struct dma_chan *dma_chan, struct scatterlist *sgl,
 	return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK);
 }
 
-static void cdma_func_init(struct dma_device *dma_dev)
+static void cdma_func_init(struct ldma_dev *d, struct dma_device *dma_dev)
 {
 	dma_dev->device_alloc_chan_resources = cdma_alloc_chan_resources;
 	dma_dev->device_free_chan_resources = cdma_free_chan_resources;
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 6fade7b4f820..edf26ecc29b0 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -24,28 +24,18 @@
 #include "../virt-dma.h"
 #include "lgm-dma.h"
 
-static inline void
-ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
-{
-	u32 old_val, new_val;
-
-	old_val = readl(d->base +  ofs);
-	new_val = (old_val & ~mask) | (val & mask);
-
-	if (new_val != old_val)
-		writel(new_val, d->base + ofs);
-}
+enum {
+	DMA_ARG_CHAN_ID,
+	DMA_ARG_DESC_CNT,
+	DMA_ARG_PORT_ID,
+	DMA_ARG_BURST_SZ,
+};
 
 static inline bool ldma_chan_tx(struct ldma_chan *c)
 {
 	return !!(c->flags & DMA_TX_CH);
 }
 
-static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c)
-{
-	return !!(c->flags & DMA_HW_DESC);
-}
-
 static void ldma_dev_reset(struct ldma_dev *d)
 
 {
@@ -251,7 +241,7 @@ static int ldma_dev_cfg(struct ldma_dev *d)
 
 static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	u32 class_low, class_high;
 	unsigned long flags;
 	u32 reg;
@@ -280,7 +270,7 @@ static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val)
 
 static void ldma_chan_irq_init(struct ldma_chan *c)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	unsigned long flags;
 	u32 enofs, crofs;
 	u32 cn_bit;
@@ -306,9 +296,36 @@ static void ldma_chan_irq_init(struct ldma_chan *c)
 	spin_unlock_irqrestore(&d->dev_lock, flags);
 }
 
+static void ldma_chan_irq_en(struct ldma_chan *c, bool en)
+{
+	struct ldma_dev *d = chan_to_ldma_dev(c);
+	u32 enofs, crofs;
+	u32 cn_bit, val;
+
+	if (ldma_chan_is_hw_desc(c))
+		return;
+
+	if (c->nr < MAX_LOWER_CHANS) {
+		enofs = DMA_IRNEN;
+		crofs = DMA_IRNCR;
+		cn_bit = BIT(c->nr);
+	} else {
+		enofs = DMA_IRNEN1;
+		crofs = DMA_IRNCR1;
+		cn_bit = BIT(c->nr - MAX_LOWER_CHANS);
+	}
+
+	writel(cn_bit, d->base + crofs);
+	val = en ? cn_bit : 0;
+	ldma_update_bits(d, cn_bit, val, enofs);
+	val = en ? DMA_CI_EOP : 0;
+	ldma_update_bits(d, DMA_CI_EOP, val, DMA_CIE);
+	dev_dbg(d->dev, "irq: %d, CIE: 0x%x\n", en, readl(d->base + DMA_CIE));
+}
+
 static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	u32 class_val;
 
 	if (d->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS)
@@ -326,9 +343,12 @@ static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
 
 int ldma_chan_on(struct ldma_chan *c)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	unsigned long flags;
 
+	if (c->onoff == DMA_CH_ON)
+		return 0;
+
 	/* If descriptors not configured, not allow to turn on channel */
 	if (WARN_ON(!c->desc_init))
 		return -EINVAL;
@@ -336,6 +356,7 @@ int ldma_chan_on(struct ldma_chan *c)
 	spin_lock_irqsave(&d->dev_lock, flags);
 	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
 	ldma_update_bits(d, DMA_CCTRL_ON, DMA_CCTRL_ON, DMA_CCTRL);
+	ldma_chan_irq_en(c, true);
 	spin_unlock_irqrestore(&d->dev_lock, flags);
 
 	c->onoff = DMA_CH_ON;
@@ -345,7 +366,7 @@ int ldma_chan_on(struct ldma_chan *c)
 
 static int ldma_chan_off(struct ldma_chan *c)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	unsigned long flags;
 	u32 val;
 	int ret;
@@ -353,6 +374,7 @@ static int ldma_chan_off(struct ldma_chan *c)
 	spin_lock_irqsave(&d->dev_lock, flags);
 	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
 	ldma_update_bits(d, DMA_CCTRL_ON, 0, DMA_CCTRL);
+	ldma_chan_irq_en(c, false);
 	spin_unlock_irqrestore(&d->dev_lock, flags);
 
 	ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
@@ -368,7 +390,7 @@ static int ldma_chan_off(struct ldma_chan *c)
 void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
 			   int desc_num)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	unsigned long flags;
 
 	spin_lock_irqsave(&d->dev_lock, flags);
@@ -390,7 +412,7 @@ void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
 
 int ldma_chan_reset(struct ldma_chan *c)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	unsigned long flags;
 	u32 val;
 	int ret;
@@ -417,7 +439,7 @@ int ldma_chan_reset(struct ldma_chan *c)
 
 static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	u32 mask = DMA_C_BOFF_EN | DMA_C_BOFF_BOF_LEN;
 	u32 val;
 
@@ -433,7 +455,7 @@ static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len)
 static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable,
 				      u32 endian_type)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	u32 mask = DMA_C_END_DE_EN | DMA_C_END_DATAENDI;
 	u32 val;
 
@@ -449,7 +471,7 @@ static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable,
 static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable,
 				      u32 endian_type)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	u32 mask = DMA_C_END_DES_EN | DMA_C_END_DESENDI;
 	u32 val;
 
@@ -464,7 +486,7 @@ static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable,
 
 static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	u32 mask, val;
 
 	/* NB, csum disabled, hdr length must be provided */
@@ -483,7 +505,7 @@ static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum)
 
 static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	u32 mask, val;
 
 	/* Only valid for RX channel */
@@ -499,7 +521,7 @@ static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable)
 
 static void ldma_chan_abc_cfg(struct ldma_chan *c, bool enable)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	u32 mask, val;
 
 	if (d->ver < DMA_VER32 || ldma_chan_tx(c))
@@ -556,7 +578,7 @@ static int ldma_port_cfg(struct ldma_port *p)
 
 static int ldma_chan_cfg(struct ldma_chan *c)
 {
-	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
 	unsigned long flags;
 	u32 reg;
 
@@ -636,6 +658,9 @@ static int ldma_parse_dt(struct ldma_dev *d)
 	if (fwnode_property_read_bool(fwnode, "intel,dma-desc-fack"))
 		d->flags |= DMA_VALID_DESC_FETCH_ACK;
 
+	if (fwnode_property_read_bool(fwnode, "intel,dma-hw-desc"))
+		d->flags |= DMA_CHAN_HW_DESC;
+
 	if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt",
 				     &d->pollcnt))
 		d->pollcnt = DMA_DFT_POLL_CNT;
@@ -664,34 +689,23 @@ static int ldma_parse_dt(struct ldma_dev *d)
 	return 0;
 }
 
-int ldma_terminate_all(struct dma_chan *chan)
+int ldma_terminate_all(struct dma_chan *dma_chan)
 {
-	struct ldma_chan *c = to_ldma_chan(chan);
-	unsigned long flags;
-	LIST_HEAD(head);
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
 
-	spin_lock_irqsave(&c->vchan.lock, flags);
-	vchan_get_all_descriptors(&c->vchan, &head);
-	spin_unlock_irqrestore(&c->vchan.lock, flags);
-	vchan_dma_desc_free_list(&c->vchan, &head);
+	vchan_free_chan_resources(&c->vchan);
 
 	return ldma_chan_reset(c);
 }
 
-int ldma_resume_chan(struct dma_chan *chan)
+int ldma_resume_chan(struct dma_chan *dma_chan)
 {
-	struct ldma_chan *c = to_ldma_chan(chan);
-
-	ldma_chan_on(c);
-
-	return 0;
+	return ldma_chan_on(to_ldma_chan(dma_chan));
 }
 
-int ldma_pause_chan(struct dma_chan *chan)
+int ldma_pause_chan(struct dma_chan *dma_chan)
 {
-	struct ldma_chan *c = to_ldma_chan(chan);
-
-	return ldma_chan_off(c);
+	return ldma_chan_off(to_ldma_chan(dma_chan));
 }
 
 static u32
@@ -718,43 +732,82 @@ static int
 update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec)
 {
 	struct ldma_dev *d = ofdma->of_dma_data;
-	u32 chan_id =  spec->args[0];
-	u32 port_id =  spec->args[1];
-	u32 burst = spec->args[2];
-	struct ldma_port *p;
+	int i, chan_id, port_id, desc_cnt, burst;
 	struct ldma_chan *c;
+	struct ldma_port *p;
 
-	if (chan_id >= d->chan_nrs || port_id >= d->port_nrs)
-		return 0;
+	port_id  = -1;
+	desc_cnt = -1;
+	burst    = -1;
+
+	for (i = spec->args_count - 1; i >= 0; i--) {
+		switch (i) {
+		case DMA_ARG_BURST_SZ:
+			burst = spec->args[i];
+			break;
+
+		case DMA_ARG_PORT_ID:
+			port_id = spec->args[i];
+			break;
+
+		case DMA_ARG_DESC_CNT:
+			desc_cnt = spec->args[i];
+			break;
+
+		case DMA_ARG_CHAN_ID:
+			chan_id = spec->args[i];
+			break;
+		}
+	}
+
+	if (chan_id >= d->chan_nrs || chan_id < 0)
+		return -1;
 
-	p = &d->ports[port_id];
 	c = &d->chans[chan_id];
+	if (!ldma_chan_is_hw_desc(c) && desc_cnt > 0)
+		c->desc_cnt = desc_cnt;
+
+	if (port_id < 0 || burst < 0 || port_id > d->port_nrs)
+		return chan_id;
+
+	p = &d->ports[port_id];
 	c->port = p;
 
 	update_burst_len(c, p, burst);
 
 	ldma_port_cfg(p);
 
-	return 1;
+	return chan_id;
 }
 
+/**
+ * args 0: channel ID
+ * args 1: channel length
+ * args 2: port ID
+ * args 3: burst size setting
+ */
 static struct dma_chan *ldma_xlate(struct of_phandle_args *spec,
 				   struct of_dma *ofdma)
 {
 	struct ldma_dev *d = ofdma->of_dma_data;
-	u32 chan_id =  spec->args[0];
-	int ret;
+	int chan_id;
+
+	dev_dbg(d->dev, "DMA channel args count: %d\n", spec->args_count);
+	dev_dbg(d->dev, "DMA chan no: %u, desc num: %u\n",
+		spec->args[0], spec->args[1]);
 
 	if (!spec->args_count)
 		return NULL;
 
-	/* if args_count is 1 driver use default settings */
-	if (spec->args_count > 1) {
-		ret = update_client_configs(ofdma, spec);
-		if (!ret)
-			return NULL;
+	if (!(d->flags & DMA_CHAN_HW_DESC) && spec->args_count < 2) {
+		dev_err(d->dev, "DMA channel parameter format error!\n");
+		return NULL;
 	}
 
+	chan_id = update_client_configs(ofdma, spec);
+	if (chan_id < 0)
+		return NULL;
+
 	return dma_get_slave_channel(&d->chans[chan_id].vchan.chan);
 }
 
@@ -885,10 +938,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	ret = ldma_init(d);
-	if (ret)
-		return ret;
-
 	ret = ldma_irq_init(d, pdev);
 	if (ret)
 		return ret;
@@ -903,7 +952,11 @@ static int intel_ldma_probe(struct platform_device *pdev)
 	INIT_LIST_HEAD(&dma_dev->channels);
 
 	/* init dma callback functions */
-	d->ops->dma_func_init(dma_dev);
+	d->ops->dma_func_init(d, dma_dev);
+
+	ret = ldma_init(d);
+	if (ret)
+		return ret;
 
 	platform_set_drvdata(pdev, d);
 
diff --git a/drivers/dma/lgm/lgm-dma.h b/drivers/dma/lgm/lgm-dma.h
index ff5aa5142019..d6692b80e329 100644
--- a/drivers/dma/lgm/lgm-dma.h
+++ b/drivers/dma/lgm/lgm-dma.h
@@ -39,7 +39,7 @@ struct ldma_ops {
 	/* DMA interrupt init */
 	int (*dma_irq_init)(struct ldma_dev *d, struct platform_device *pdev);
 	/* DMA callback API init */
-	void (*dma_func_init)(struct dma_device *dma_dev);
+	void (*dma_func_init)(struct ldma_dev *d, struct dma_device *dma_dev);
 };
 
 struct ldma_chan {
@@ -218,6 +218,7 @@ extern struct ldma_ops hdma_ops;
 #define DMA_DBURST_WR			BIT(6)
 #define DMA_VALID_DESC_FETCH_ACK	BIT(7)
 #define DMA_DFT_DRB			BIT(8)
+#define DMA_CHAN_HW_DESC		BIT(9)
 
 #define DMA_ORRC_MAX_CNT		16
 #define DMA_DFT_POLL_CNT		SZ_4
@@ -236,12 +237,10 @@ extern struct ldma_ops hdma_ops;
 /* DMA flags */
 #define DMA_TX_CH			BIT(0)
 #define DMA_RX_CH			BIT(1)
-#define DEVICE_ALLOC_DESC		BIT(2)
-#define CHAN_IN_USE			BIT(3)
-#define DMA_HW_DESC			BIT(4)
+#define CHAN_IN_USE			BIT(2)
+#define DMA_HW_DESC			BIT(3)
 
 /* Descriptor fields */
-#define DESC_DATA_LEN			GENMASK(15, 0)
 #define DESC_BYTE_OFF			GENMASK(25, 23)
 #define DESC_EOP			BIT(28)
 #define DESC_SOP			BIT(29)
@@ -267,6 +266,28 @@ static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev)
 	return container_of(dma_dev, struct ldma_dev, dma_dev);
 }
 
+static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c)
+{
+	return !!(c->flags & DMA_HW_DESC);
+}
+
+static inline struct ldma_dev *chan_to_ldma_dev(struct ldma_chan *c)
+{
+	return to_ldma_dev(c->vchan.chan.device);
+}
+
+static inline void
+ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
+{
+	u32 old_val, new_val;
+
+	old_val = readl(d->base +  ofs);
+	new_val = (old_val & ~mask) | (val & mask);
+
+	if (new_val != old_val)
+		writel(new_val, d->base + ofs);
+}
+
 void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
 			   int desc_num);
 int ldma_terminate_all(struct dma_chan *chan);
diff --git a/drivers/dma/lgm/lgm-hdma.c b/drivers/dma/lgm/lgm-hdma.c
index 9133aa31c47b..198005b48d59 100644
--- a/drivers/dma/lgm/lgm-hdma.c
+++ b/drivers/dma/lgm/lgm-hdma.c
@@ -4,6 +4,7 @@
  *
  * Copyright (c) 2025 Maxlinear Inc.
  */
+
 #include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
@@ -22,12 +23,68 @@
 #include "../virt-dma.h"
 #include "lgm-dma.h"
 
+/* Descriptor fields */
+#define DESC_DATA_LEN		GENMASK(13, 0)
+#define DESC_BP_EXT		GENMASK(26, 23)
+#define DESC_EOP		BIT(28)
+#define DESC_SOP		BIT(29)
+#define DESC_C			BIT(30)
+#define DESC_OWN		BIT(31)
+
+struct dw4_desc_hw {
+	u32 dw0;
+	u32 dw1;
+	u32 dw2;
+	u32 dw3;
+} __packed __aligned(8);
+
+struct dw4_desc_sw {
+	struct virt_dma_desc	vd;
+	struct ldma_chan	*chan;
+	struct dw4_desc_hw	*desc_hw;
+};
+
+/**
+ * hdma TX need some sideband info to switch in dw0 and dw1
+ */
+struct chan_cfg {
+	u32 desc_dw0;
+	u32 desc_dw1;
+};
+
+struct hdma_chan {
+	struct dw4_desc_sw	*ds;
+	struct chan_cfg		cfg;
+	struct ldma_chan	*c;
+	int			prep_idx; /* desc prep idx*/
+	int			comp_idx; /* desc comp idx*/
+	int			prep_desc_cnt;
+	struct tasklet_struct	task;
+};
+
 static int hdma_ctrl_init(struct ldma_dev *d);
 static int hdma_port_init(struct ldma_dev *d, struct ldma_port *p);
 static int hdma_chan_init(struct ldma_dev *d, struct ldma_chan *c);
 static int hdma_irq_init(struct ldma_dev *d, struct platform_device *pdev);
-static void hdma_func_init(struct dma_device *dma_dev);
+static void hdma_func_init(struct ldma_dev *d, struct dma_device *dma_dev);
 static void hdma_free_chan_resources(struct dma_chan *dma_chan);
+static void dma_tx_chan_complete(struct tasklet_struct *t);
+
+static inline
+struct dw4_desc_sw *to_lgm_dma_dw4_desc(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct dw4_desc_sw, vd);
+}
+
+static inline bool is_dma_chan_tx(struct ldma_dev *d)
+{
+	return (d->type == DMA_TYPE_TX);
+}
+
+static inline bool is_dma_chan_rx(struct ldma_dev *d)
+{
+	return (d->type == DMA_TYPE_RX);
+}
 
 struct ldma_ops hdma_ops = {
 	.dma_ctrl_init = hdma_ctrl_init,
@@ -54,18 +111,21 @@ static int hdma_port_init(struct ldma_dev *d, struct ldma_port *p)
 	return 0;
 }
 
-static inline void hdma_free_desc_resource(struct virt_dma_desc *vdesc)
+static inline void hdma_free_desc_resource(struct virt_dma_desc *vd)
 {
 }
 
 static int hdma_chan_init(struct ldma_dev *d, struct ldma_chan *c)
 {
+	struct hdma_chan *chan;
+
 	c->data_endian = DMA_DFT_ENDIAN;
 	c->desc_endian = DMA_DFT_ENDIAN;
 	c->data_endian_en = false;
 	c->desc_endian_en = false;
 	c->desc_rx_np = false;
-	c->flags |= DEVICE_ALLOC_DESC;
+	if (d->flags & DMA_CHAN_HW_DESC)
+		c->flags |= DMA_HW_DESC;
 	c->onoff = DMA_CH_OFF;
 	c->rst = DMA_CHAN_RST;
 	c->abc_en = true;
@@ -74,51 +134,404 @@ static int hdma_chan_init(struct ldma_dev *d, struct ldma_chan *c)
 	c->vchan.desc_free = hdma_free_desc_resource;
 	vchan_init(&c->vchan, &d->dma_dev);
 
+	chan = devm_kzalloc(d->dev, sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+
+	c->priv = chan;
+	chan->c = c;
+	if (is_dma_chan_tx(d))
+		tasklet_setup(&chan->task, dma_tx_chan_complete);
+
 	return 0;
 }
 
+static inline void hdma_get_irq_off(int high, u32 *en_off, u32 *cr_off)
+{
+	if (!high) {
+		*cr_off = DMA_IRNCR;
+		*en_off = DMA_IRNEN;
+	} else {
+		*cr_off = DMA_IRNCR1;
+		*en_off = DMA_IRNEN1;
+	}
+}
+
+static inline
+void hdma_chan_int_enable(struct ldma_dev *d, struct ldma_chan *c)
+{
+	unsigned long flags;
+	u32 val, en_off, cr_off, cid;
+
+	spin_lock_irqsave(&d->dev_lock, flags);
+	/* select DMA channel */
+	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+	/* Enable EOP interrupt */
+	ldma_update_bits(d, DMA_CI_EOP, DMA_CI_EOP, DMA_CIE);
+
+	val = c->nr >= MAX_LOWER_CHANS ? 1 : 0;
+	cid = c->nr >= MAX_LOWER_CHANS ? c->nr - MAX_LOWER_CHANS : c->nr;
+	hdma_get_irq_off(val, &en_off, &cr_off);
+	ldma_update_bits(d, BIT(cid), BIT(cid), en_off);
+
+	spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void dma_tx_chan_complete(struct tasklet_struct *t)
+{
+	struct hdma_chan *chan = from_tasklet(chan, t, task);
+	struct ldma_chan *c = chan->c;
+	struct ldma_dev *d = chan_to_ldma_dev(c);
+
+	/* check how many valid descriptor from DMA */
+	while (chan->prep_desc_cnt > 0) {
+		struct dmaengine_desc_callback cb;
+		struct dma_async_tx_descriptor *tx;
+		struct dw4_desc_sw *desc_sw;
+		struct dw4_desc_hw *desc_hw;
+
+		desc_sw = chan->ds + chan->comp_idx;
+		desc_hw = desc_sw->desc_hw;
+		dma_map_single(d->dev, desc_hw,
+			       sizeof(*desc_hw), DMA_FROM_DEVICE);
+
+		/* desc still in processing, stop */
+		if (!FIELD_GET(DESC_C, desc_hw->dw3))
+			break;
+
+		tx = &desc_sw->vd.tx;
+		dmaengine_desc_get_callback(tx, &cb);
+
+		dma_cookie_complete(tx);
+		chan->comp_idx = (chan->comp_idx + 1) % c->desc_cnt;
+		chan->prep_desc_cnt -= 1;
+		dmaengine_desc_callback_invoke(&cb, NULL);
+	}
+
+	hdma_chan_int_enable(d, c);
+}
+
+static unsigned long hdma_irq_stat(struct ldma_dev *d, int high)
+{
+	u32 irnen, irncr, en_off, cr_off, cid;
+	unsigned long flags;
+	unsigned long ret;
+
+	spin_lock_irqsave(&d->dev_lock, flags);
+
+	hdma_get_irq_off(high, &en_off, &cr_off);
+
+	irncr = readl(d->base + cr_off);
+	irnen = readl(d->base + en_off);
+
+	if (!irncr || !irnen || !(irncr & irnen)) {
+		writel(irncr, d->base + cr_off);
+		spin_unlock_irqrestore(&d->dev_lock, flags);
+		return 0;
+	}
+
+	/* disable EOP interrupt for the channel */
+	for_each_set_bit(cid, (const unsigned long *)&irncr, d->chan_nrs) {
+		/* select DMA channel */
+		ldma_update_bits(d, DMA_CS_MASK, cid, DMA_CS);
+		/* Clear EOP interrupt status */
+		writel(readl(d->base + DMA_CIS), d->base + DMA_CIS);
+		/* Disable EOP interrupt */
+		writel(0, d->base + DMA_CIE);
+	}
+
+	/* ACK interrupt */
+	writel(irncr, d->base + cr_off);
+	irnen &= ~irncr;
+	/* Disable interrupt */
+	writel(irnen, d->base + en_off);
+
+	spin_unlock_irqrestore(&d->dev_lock, flags);
+
+	ret = irncr;
+
+	return high ? ret << 32 : ret;
+}
+
+static irqreturn_t hdma_interrupt(int irq, void *dev_id)
+{
+	struct ldma_dev *d = dev_id;
+	struct hdma_chan *chan;
+	u32 cid;
+	unsigned long stat;
+
+	stat = hdma_irq_stat(d, 0) | hdma_irq_stat(d, 1);
+	if (!stat)
+		return IRQ_HANDLED;
+
+	for_each_set_bit(cid, (const unsigned long *)&stat, d->chan_nrs) {
+		chan = (struct hdma_chan *)d->chans[cid].priv;
+		tasklet_schedule(&chan->task);
+	}
+
+	return IRQ_HANDLED;
+}
+
 static int hdma_irq_init(struct ldma_dev *d, struct platform_device *pdev)
 {
-	return 0;
+	if (d->flags & DMA_CHAN_HW_DESC)
+		return 0;
+
+	d->irq = platform_get_irq(pdev, 0);
+	if (d->irq < 0)
+		return d->irq;
+
+	return devm_request_irq(d->dev, d->irq, hdma_interrupt, 0,
+				DRIVER_NAME, d);
 }
 
+/**
+ * Allocate DMA descriptor list
+ */
 static int hdma_alloc_chan_resources(struct dma_chan *dma_chan)
 {
 	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct hdma_chan *chan = (struct hdma_chan *)c->priv;
 	struct device *dev = c->vchan.chan.device->dev;
+	struct dw4_desc_sw *desc_sw;
+	struct dw4_desc_hw *desc_hw;
+	size_t desc_sz;
+	int i;
 
-	dev_dbg(dev, "allocate channel resource!\n");
-
-	if (c->flags & DMA_HW_DESC) {
+	/* HW allocate DMA descriptors */
+	if (ldma_chan_is_hw_desc(c)) {
 		c->flags |= CHAN_IN_USE;
 		dev_dbg(dev, "desc in hw\n");
+		return 0;
 	}
 
-	return 0;
+	if (!c->desc_cnt) {
+		dev_err(dev, "descriptor count is not set\n");
+		return -EINVAL;
+	}
+
+	desc_sz = c->desc_cnt * sizeof(*desc_hw);
+
+	c->desc_base = kzalloc(desc_sz, GFP_KERNEL);
+	if (!c->desc_base)
+		return -ENOMEM;
+
+	c->desc_phys = dma_map_single(dev, c->desc_base,
+				      desc_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, c->desc_phys)) {
+		dev_err(dev, "dma mapping error for dma desc list\n");
+		goto desc_err;
+	}
+
+	desc_sz = c->desc_cnt * sizeof(*desc_sw);
+	chan->ds = kzalloc(desc_sz, GFP_KERNEL);
+
+	if (!chan->ds)
+		goto desc_err;
+
+	desc_hw = (struct dw4_desc_hw *)c->desc_base;
+	for (i = 0; i < c->desc_cnt; i++) {
+		desc_sw = chan->ds + i;
+		desc_sw->chan = c;
+		desc_sw->desc_hw = desc_hw + i;
+	}
+
+	dev_dbg(dev, "DMA CH: %u, phy addr: 0x%llx, desc cnt: %u\n",
+		c->nr, c->desc_phys, c->desc_cnt);
+
+	ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt);
+	ldma_chan_on(c);
+
+	return c->desc_cnt;
+
+desc_err:
+	kfree(c->desc_base);
+	return -EINVAL;
 }
 
 static void hdma_free_chan_resources(struct dma_chan *dma_chan)
 {
 	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct hdma_chan *chan = (struct hdma_chan *)c->priv;
+	struct device *dev = c->vchan.chan.device->dev;
+
+	ldma_chan_reset(c);
+
+	/* HW allocate DMA descriptors */
+	if (ldma_chan_is_hw_desc(c)) {
+		c->flags &= ~CHAN_IN_USE;
+		dev_dbg(dev, "%s: desc in hw\n", __func__);
+		return;
+	}
+
+	vchan_free_chan_resources(&c->vchan);
+	kfree(chan->ds);
+	kfree(c->desc_base);
+
+	dev_dbg(dev, "Free DMA channel %u\n", c->nr);
+}
+
+static int
+hdma_slave_config(struct dma_chan *dma_chan, struct dma_slave_config *cfg)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct hdma_chan *chan = (struct hdma_chan *)c->priv;
+
+	if (cfg->peripheral_config)
+		memcpy(&chan->cfg, cfg->peripheral_config, sizeof(chan->cfg));
+
+	return 0;
+}
+
+static void hdma_desc_set_own(struct dw4_desc_sw *desc_sw)
+{
+	struct ldma_chan *c = desc_sw->chan;
+	struct dw4_desc_hw *desc_hw;
+	struct device *dev = c->vchan.chan.device->dev;
+
+	desc_hw = desc_sw->desc_hw;
+	desc_hw->dw3 |= DESC_OWN;
 
-	c->flags &= ~CHAN_IN_USE;
+	dma_map_single(dev, desc_hw, sizeof(*desc_hw), DMA_TO_DEVICE);
+}
+
+static void hdma_execute_pending(struct ldma_chan *c)
+{
+	struct virt_dma_desc *vd = NULL;
+	struct dw4_desc_sw *desc_sw;
+
+	do {
+		vd = vchan_next_desc(&c->vchan);
+		if (!vd)
+			break;
+		list_del(&vd->node);
+		desc_sw = to_lgm_dma_dw4_desc(vd);
+		hdma_desc_set_own(desc_sw);
+	} while (vd);
 }
 
 static void hdma_issue_pending(struct dma_chan *dma_chan)
 {
 	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	unsigned long flags;
 
-	ldma_chan_on(c);
+	if (ldma_chan_is_hw_desc(c))
+		return;
+
+	spin_lock_irqsave(&c->vchan.lock, flags);
+	if (vchan_issue_pending(&c->vchan))
+		hdma_execute_pending(c);
+	spin_unlock_irqrestore(&c->vchan.lock, flags);
 }
 
 static enum dma_status
 hdma_tx_status(struct dma_chan *dma_chan, dma_cookie_t cookie,
 	       struct dma_tx_state *txstate)
 {
-	return DMA_COMPLETE;
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+
+	if (ldma_chan_is_hw_desc(c))
+		return DMA_COMPLETE;
+
+	return dma_cookie_status(dma_chan, cookie, txstate);
+}
+
+/**
+ * initializa the HW DMA descriptor.
+ * dma sideband info passed in sideband info
+ */
+static void
+hdma_setup_desc(struct ldma_chan *c, struct dw4_desc_hw *desc_hw,
+		dma_addr_t paddr, unsigned int len)
+{
+	struct hdma_chan *chan = (struct hdma_chan *)c->priv;
+	u32 dw3 = 0;
+
+	desc_hw->dw0 = chan->cfg.desc_dw0;
+	desc_hw->dw1 = chan->cfg.desc_dw1;
+	desc_hw->dw2 = lower_32_bits(paddr); /* physicall address */
+
+	dw3 = FIELD_PREP(DESC_DATA_LEN, len);
+	dw3 |= FIELD_PREP(DESC_BP_EXT, upper_32_bits(paddr));
+	dw3 |= FIELD_PREP(DESC_SOP, 1);
+	dw3 |= FIELD_PREP(DESC_EOP, 1);
+	desc_hw->dw3 = dw3;
+}
+
+static inline
+bool hdma_desc_empty(struct hdma_chan *chan, unsigned int desc_cnt)
+{
+	return (chan->prep_desc_cnt == desc_cnt);
+}
+
+/**
+ * HW Manipulate DMA descriptors.
+ * Only need configure descriptor address and length to DMA.
+ */
+static struct dma_async_tx_descriptor *
+hdma_chan_hw_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num)
+{
+	struct ldma_chan *c = to_ldma_chan(chan);
+	struct ldma_dev *d = chan_to_ldma_dev(c);
+
+	if (!desc_num) {
+		dev_err(d->dev, "Channel %d must allocate descriptor first\n",
+			c->nr);
+		return NULL;
+	}
+
+	if (desc_num > DMA_MAX_DESC_NUM) {
+		dev_err(d->dev, "Channel %d descriptor number out of range %d\n",
+			c->nr, desc_num);
+		return NULL;
+	}
+
+	ldma_chan_desc_hw_cfg(c, desc_base, desc_num);
+
+	c->desc_cnt = desc_num;
+	c->desc_phys = desc_base;
+
+	return NULL;
+}
+
+/**
+ *  HDMA driver design to use 1 to 1 SW and HW descriptor mapping
+ */
+static struct dma_async_tx_descriptor *
+hdma_prep_slave_sg(struct dma_chan *dma_chan, struct scatterlist *sgl,
+		   unsigned int sglen, enum dma_transfer_direction dir,
+		   unsigned long flags, void *context)
+{
+	struct ldma_chan *c = to_ldma_chan(dma_chan);
+	struct hdma_chan *chan = (struct hdma_chan *)c->priv;
+	struct device *dev = c->vchan.chan.device->dev;
+	struct dw4_desc_hw *desc_hw;
+	struct dw4_desc_sw *desc_sw;
+
+	if (!sgl || sglen < 1) {
+		dev_err(dev, "%s param error!\n", __func__);
+		return NULL;
+	}
+
+	if (ldma_chan_is_hw_desc(c))
+		return hdma_chan_hw_desc_cfg(dma_chan, sgl->dma_address, sglen);
+
+	if (hdma_desc_empty(chan, c->desc_cnt))
+		return NULL;
+
+	desc_sw = chan->ds + chan->prep_idx;
+	chan->prep_idx = (chan->prep_idx + 1) % c->desc_cnt;
+	desc_hw = desc_sw->desc_hw;
+	chan->prep_desc_cnt += 1;
+
+	hdma_setup_desc(c, desc_sw->desc_hw,
+			sg_dma_address(sgl), sg_dma_len(sgl));
+
+	return vchan_tx_prep(&c->vchan, &desc_sw->vd,
+			     DMA_CTRL_ACK);
 }
 
-static void hdma_func_init(struct dma_device *dma_dev)
+static void hdma_func_init(struct ldma_dev *d, struct dma_device *dma_dev)
 {
 	dma_dev->device_alloc_chan_resources = hdma_alloc_chan_resources;
 	dma_dev->device_free_chan_resources = hdma_free_chan_resources;
@@ -127,4 +540,20 @@ static void hdma_func_init(struct dma_device *dma_dev)
 	dma_dev->device_tx_status = hdma_tx_status;
 	dma_dev->device_resume = ldma_resume_chan;
 	dma_dev->device_pause = ldma_pause_chan;
+	dma_dev->device_prep_slave_sg = hdma_prep_slave_sg;
+	dma_dev->device_config = hdma_slave_config;
+
+	dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+				BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+				BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) |
+				BIT(DMA_SLAVE_BUSWIDTH_16_BYTES);
+	dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+				BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+				BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) |
+				BIT(DMA_SLAVE_BUSWIDTH_16_BYTES);
+	if (is_dma_chan_tx(d))
+		dma_dev->directions = BIT(DMA_MEM_TO_DEV);
+	else
+		dma_dev->directions = BIT(DMA_DEV_TO_MEM);
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 }
-- 
2.43.5


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 5/5] dmaengine: lgm_dma: Added HDMA RX interrupt handle functions.
  2025-07-30  2:45 [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Zhu Yixin
                   ` (2 preceding siblings ...)
  2025-07-30  2:45 ` [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function Zhu Yixin
@ 2025-07-30  2:45 ` Zhu Yixin
  2025-07-30  6:19 ` [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Krzysztof Kozlowski
  4 siblings, 0 replies; 14+ messages in thread
From: Zhu Yixin @ 2025-07-30  2:45 UTC (permalink / raw)
  To: dmaengine, vkoul; +Cc: jchng, sureshnagaraj, Zhu Yixin

Enhanced tasklet function to handle HDMA RX interrupt.

Signed-off-by: Zhu Yixin <yzhu@maxlinear.com>
---
 drivers/dma/lgm/lgm-hdma.c | 20 ++++++++++++++++----
 1 file changed, 16 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/lgm/lgm-hdma.c b/drivers/dma/lgm/lgm-hdma.c
index 198005b48d59..531d2b2f51b7 100644
--- a/drivers/dma/lgm/lgm-hdma.c
+++ b/drivers/dma/lgm/lgm-hdma.c
@@ -31,6 +31,12 @@
 #define DESC_C			BIT(30)
 #define DESC_OWN		BIT(31)
 
+/* RX sideband information from DMA */
+struct dma_rx_data {
+	unsigned int	data_len;
+	unsigned int	chno;
+};
+
 struct dw4_desc_hw {
 	u32 dw0;
 	u32 dw1;
@@ -68,7 +74,7 @@ static int hdma_chan_init(struct ldma_dev *d, struct ldma_chan *c);
 static int hdma_irq_init(struct ldma_dev *d, struct platform_device *pdev);
 static void hdma_func_init(struct ldma_dev *d, struct dma_device *dma_dev);
 static void hdma_free_chan_resources(struct dma_chan *dma_chan);
-static void dma_tx_chan_complete(struct tasklet_struct *t);
+static void dma_chan_complete(struct tasklet_struct *t);
 
 static inline
 struct dw4_desc_sw *to_lgm_dma_dw4_desc(struct virt_dma_desc *vd)
@@ -140,8 +146,7 @@ static int hdma_chan_init(struct ldma_dev *d, struct ldma_chan *c)
 
 	c->priv = chan;
 	chan->c = c;
-	if (is_dma_chan_tx(d))
-		tasklet_setup(&chan->task, dma_tx_chan_complete);
+	tasklet_setup(&chan->task, dma_chan_complete);
 
 	return 0;
 }
@@ -177,7 +182,7 @@ void hdma_chan_int_enable(struct ldma_dev *d, struct ldma_chan *c)
 	spin_unlock_irqrestore(&d->dev_lock, flags);
 }
 
-static void dma_tx_chan_complete(struct tasklet_struct *t)
+static void dma_chan_complete(struct tasklet_struct *t)
 {
 	struct hdma_chan *chan = from_tasklet(chan, t, task);
 	struct ldma_chan *c = chan->c;
@@ -185,6 +190,7 @@ static void dma_tx_chan_complete(struct tasklet_struct *t)
 
 	/* check how many valid descriptor from DMA */
 	while (chan->prep_desc_cnt > 0) {
+		struct dma_rx_data *data;
 		struct dmaengine_desc_callback cb;
 		struct dma_async_tx_descriptor *tx;
 		struct dw4_desc_sw *desc_sw;
@@ -202,6 +208,12 @@ static void dma_tx_chan_complete(struct tasklet_struct *t)
 		tx = &desc_sw->vd.tx;
 		dmaengine_desc_get_callback(tx, &cb);
 
+		if (is_dma_chan_rx(d)) {
+			data = (struct dma_rx_data *)cb.callback_param;
+			data->data_len = FIELD_GET(DESC_DATA_LEN, desc_hw->dw3);
+			data->chno = c->nr;
+		}
+
 		dma_cookie_complete(tx);
 		chan->comp_idx = (chan->comp_idx + 1) % c->desc_cnt;
 		chan->prep_desc_cnt -= 1;
-- 
2.43.5


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree
  2025-07-30  2:45 [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Zhu Yixin
                   ` (3 preceding siblings ...)
  2025-07-30  2:45 ` [PATCH 5/5] dmaengine: lgm_dma: Added HDMA RX interrupt handle functions Zhu Yixin
@ 2025-07-30  6:19 ` Krzysztof Kozlowski
  2025-07-30  8:43   ` Yi xin Zhu
  4 siblings, 1 reply; 14+ messages in thread
From: Krzysztof Kozlowski @ 2025-07-30  6:19 UTC (permalink / raw)
  To: Zhu Yixin, dmaengine, vkoul; +Cc: jchng, sureshnagaraj

On 30/07/2025 04:45, Zhu Yixin wrote:
> Remove platform data and unified the setting from device tree.
> 
> Signed-off-by: Zhu Yixin <yzhu@maxlinear.com>
> ---
>  .../devicetree/bindings/dma/intel,ldma.yaml   |  67 ++++-
>  drivers/dma/lgm/lgm-dma.c                     | 242 +++++++-----------

Please run scripts/checkpatch.pl on the patches and fix reported
warnings. After that, run also 'scripts/checkpatch.pl --strict' on the
patches and (probably) fix more warnings. Some warnings can be ignored,
especially from --strict run, but the code here looks like it needs a
fix. Feel free to get in touch if the warning is not clear.


>  2 files changed, 144 insertions(+), 165 deletions(-)
> 
> diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> index d6bb553a2c6f..59f928297613 100644
> --- a/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> +++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> @@ -7,8 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
>  title: Lightning Mountain centralized DMA controllers.
>  
>  maintainers:
> -  - chuanhua.lei@intel.com
> -  - mallikarjunax.reddy@intel.com
> +  - yzhu@maxlinear.com
>  
>  allOf:
>    - $ref: dma-controller.yaml#
> @@ -16,14 +15,7 @@ allOf:
>  properties:
>    compatible:
>      enum:
> -      - intel,lgm-cdma
> -      - intel,lgm-dma2tx
> -      - intel,lgm-dma1rx
> -      - intel,lgm-dma1tx
> -      - intel,lgm-dma0tx
> -      - intel,lgm-dma3
> -      - intel,lgm-toe-dma30
> -      - intel,lgm-toe-dma31
> +      - intel,lgm-ldma

Nothing explains why you break the ABI.

<form letter>
Please use scripts/get_maintainers.pl to get a list of necessary people
and lists to CC. It might happen, that command when run on an older
kernel, gives you outdated entries. Therefore please be sure you base
your patches on recent Linux kernel.

Tools like b4 or scripts/get_maintainer.pl provide you proper list of
people, so fix your workflow. Tools might also fail if you work on some
ancient tree (don't, instead use mainline) or work on fork of kernel
(don't, instead use mainline). Just use b4 and everything should be
fine, although remember about `b4 prep --auto-to-cc` if you added new
patches to the patchset.

You missed at least devicetree list (maybe more), so this won't be
tested by automated tooling. Performing review on untested code might be
a waste of time.

Please kindly resend and include all necessary To/Cc entries.
</form letter>



Best regards,
Krzysztof

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/5] dmaengine: lgm-dma: split legacy DMA and HDMA functions
  2025-07-30  2:45 ` [PATCH 3/5] dmaengine: lgm-dma: split legacy DMA and HDMA functions Zhu Yixin
@ 2025-07-30  6:20   ` Krzysztof Kozlowski
  2025-07-30  8:52     ` Yi xin Zhu
  0 siblings, 1 reply; 14+ messages in thread
From: Krzysztof Kozlowski @ 2025-07-30  6:20 UTC (permalink / raw)
  To: Zhu Yixin, dmaengine, vkoul; +Cc: jchng, sureshnagaraj

On 30/07/2025 04:45, Zhu Yixin wrote:
> Move legacy DMA functions into lgm-cdma.c.
> Move HDMA functions into lgm-hdma.c
> Keep the driver flow and general functions in lgm-dma.c
> 
> Signed-off-by: Zhu Yixin <yzhu@maxlinear.com>
> ---
>  .../devicetree/bindings/dma/intel,ldma.yaml   |   7 +-
>  drivers/dma/lgm/Makefile                      |   2 +-
>  drivers/dma/lgm/lgm-cdma.c                    | 492 ++++++++++
>  drivers/dma/lgm/lgm-dma.c                     | 839 ++----------------
>  drivers/dma/lgm/lgm-dma.h                     | 278 ++++++
>  drivers/dma/lgm/lgm-hdma.c                    | 130 +++
>  6 files changed, 961 insertions(+), 787 deletions(-)
>  create mode 100644 drivers/dma/lgm/lgm-cdma.c
>  create mode 100644 drivers/dma/lgm/lgm-dma.h
>  create mode 100644 drivers/dma/lgm/lgm-hdma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> index 59f928297613..f91d849edc4c 100644
> --- a/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> +++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> @@ -15,7 +15,8 @@ allOf:
>  properties:
>    compatible:
>      enum:
> -      - intel,lgm-ldma
> +      - intel,lgm-cdma
> +      - intel,lgm-hdma

NAK, stop random changes to compatibles.

Please run scripts/checkpatch.pl on the patches and fix reported
warnings. After that, run also 'scripts/checkpatch.pl --strict' on the
patches and (probably) fix more warnings. Some warnings can be ignored,
especially from --strict run, but the code here looks like it needs a
fix. Feel free to get in touch if the warning is not clear.

<form letter>
Please use scripts/get_maintainers.pl to get a list of necessary people
and lists to CC. It might happen, that command when run on an older
kernel, gives you outdated entries. Therefore please be sure you base
your patches on recent Linux kernel.

Tools like b4 or scripts/get_maintainer.pl provide you proper list of
people, so fix your workflow. Tools might also fail if you work on some
ancient tree (don't, instead use mainline) or work on fork of kernel
(don't, instead use mainline). Just use b4 and everything should be
fine, although remember about `b4 prep --auto-to-cc` if you added new
patches to the patchset.

You missed at least devicetree list (maybe more), so this won't be
tested by automated tooling. Performing review on untested code might be
a waste of time.

Please kindly resend and include all necessary To/Cc entries.
</form letter>


Best regards,
Krzysztof

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function.
  2025-07-30  2:45 ` [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function Zhu Yixin
@ 2025-07-30  6:21   ` Krzysztof Kozlowski
  2025-07-30  8:59     ` Yi xin Zhu
  2025-07-30 16:17   ` kernel test robot
  1 sibling, 1 reply; 14+ messages in thread
From: Krzysztof Kozlowski @ 2025-07-30  6:21 UTC (permalink / raw)
  To: Zhu Yixin, dmaengine, vkoul; +Cc: jchng, sureshnagaraj

On 30/07/2025 04:45, Zhu Yixin wrote:
> Added HDMA software mode to handle DMA TX functions.
> 
> Signed-off-by: Zhu Yixin <yzhu@maxlinear.com>
> ---
>  .../devicetree/bindings/dma/intel,ldma.yaml   |   6 +
>  drivers/dma/lgm/lgm-cdma.c                    |  42 +-
>  drivers/dma/lgm/lgm-dma.c                     | 189 +++++---
>  drivers/dma/lgm/lgm-dma.h                     |  31 +-
>  drivers/dma/lgm/lgm-hdma.c                    | 453 +++++++++++++++++-
>  5 files changed, 608 insertions(+), 113 deletions(-)
> 
> diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> index f91d849edc4c..e58f1d13aee3 100644
> --- a/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> +++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> @@ -118,6 +118,11 @@ properties:
>      description:
>        Name of the DMA.
>  
> +  intel,dma-hw-desc:
> +    type: boolean
> +    description:
> +      DMA descriptor manupulated by Hardware.
Your commit msg explains nothing, so basically: no. Write proper commit
msgs documenting hardware, not software.

Please run scripts/checkpatch.pl on the patches and fix reported
warnings. After that, run also 'scripts/checkpatch.pl --strict' on the
patches and (probably) fix more warnings. Some warnings can be ignored,
especially from --strict run, but the code here looks like it needs a
fix. Feel free to get in touch if the warning is not clear.

<form letter>
Please use scripts/get_maintainers.pl to get a list of necessary people
and lists to CC. It might happen, that command when run on an older
kernel, gives you outdated entries. Therefore please be sure you base
your patches on recent Linux kernel.

Tools like b4 or scripts/get_maintainer.pl provide you proper list of
people, so fix your workflow. Tools might also fail if you work on some
ancient tree (don't, instead use mainline) or work on fork of kernel
(don't, instead use mainline). Just use b4 and everything should be
fine, although remember about `b4 prep --auto-to-cc` if you added new
patches to the patchset.

You missed at least devicetree list (maybe more), so this won't be
tested by automated tooling. Performing review on untested code might be
a waste of time.

Please kindly resend and include all necessary To/Cc entries.
</form letter>

Best regards,
Krzysztof

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree
  2025-07-30  6:19 ` [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Krzysztof Kozlowski
@ 2025-07-30  8:43   ` Yi xin Zhu
  2025-07-30  8:54     ` Krzysztof Kozlowski
  0 siblings, 1 reply; 14+ messages in thread
From: Yi xin Zhu @ 2025-07-30  8:43 UTC (permalink / raw)
  To: Krzysztof Kozlowski, dmaengine@vger.kernel.org, vkoul@kernel.org
  Cc: Jack Ping Chng, Suresh Nagaraj

Hi Krzysztof,

On 30/07/2025 14:20, Krzysztof Kozlowski wrote:
> Please run scripts/checkpatch.pl on the patches and fix reported warnings.
> After that, run also 'scripts/checkpatch.pl --strict' on the patches and
> (probably) fix more warnings. Some warnings can be ignored, especially from -
> -strict run, but the code here looks like it needs a fix. Feel free to get in touch if
> the warning is not clear.

I'll run checkpatch.pl --strict on patches and submit an updated version.

On 30/07/2025 14:20, Krzysztof Kozlowski wrote:
> Nothing explains why you break the ABI.
> 
> <form letter>
> Please use scripts/get_maintainers.pl to get a list of necessary people and lists
> to CC. It might happen, that command when run on an older kernel, gives you
> outdated entries. Therefore please be sure you base your patches on recent
> Linux kernel.
> 
> Tools like b4 or scripts/get_maintainer.pl provide you proper list of people, so
> fix your workflow. Tools might also fail if you work on some ancient tree
> (don't, instead use mainline) or work on fork of kernel (don't, instead use
> mainline). Just use b4 and everything should be fine, although remember
> about `b4 prep --auto-to-cc` if you added new patches to the patchset.
> 
> You missed at least devicetree list (maybe more), so this won't be tested by
> automated tooling. Performing review on untested code might be a waste of
> time.
> 
> Please kindly resend and include all necessary To/Cc entries.
> </form letter>
> 
> 
> 
> Best regards,
> Krzysztof

I'll include detailed comments to explain why the compatible change is required. 
And correct the mailing list in the next patch submission.

Best regards,
Yixin

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH 3/5] dmaengine: lgm-dma: split legacy DMA and HDMA functions
  2025-07-30  6:20   ` Krzysztof Kozlowski
@ 2025-07-30  8:52     ` Yi xin Zhu
  0 siblings, 0 replies; 14+ messages in thread
From: Yi xin Zhu @ 2025-07-30  8:52 UTC (permalink / raw)
  To: Krzysztof Kozlowski, dmaengine@vger.kernel.org, vkoul@kernel.org
  Cc: Jack Ping Chng, Suresh Nagaraj

Hi Krzysztof,

On 30/07/2025 14:21, Krzysztof Kozlowski wrote:
> NAK, stop random changes to compatibles.
> 
> Please run scripts/checkpatch.pl on the patches and fix reported warnings.
> After that, run also 'scripts/checkpatch.pl --strict' on the patches and
> (probably) fix more warnings. Some warnings can be ignored, especially from -
> -strict run, but the code here looks like it needs a fix. Feel free to get in touch if
> the warning is not clear.
> 
> <form letter>
> Please use scripts/get_maintainers.pl to get a list of necessary people and lists
> to CC. It might happen, that command when run on an older kernel, gives you
> outdated entries. Therefore please be sure you base your patches on recent
> Linux kernel.
> 
> Tools like b4 or scripts/get_maintainer.pl provide you proper list of people, so
> fix your workflow. Tools might also fail if you work on some ancient tree
> (don't, instead use mainline) or work on fork of kernel (don't, instead use
> mainline). Just use b4 and everything should be fine, although remember
> about `b4 prep --auto-to-cc` if you added new patches to the patchset.
> 
> You missed at least devicetree list (maybe more), so this won't be tested by
> automated tooling. Performing review on untested code might be a waste of
> time.
> 
> Please kindly resend and include all necessary To/Cc entries.
> </form letter>
> 
> 
> Best regards,
> Krzysztof

I'll re-organize the patches to avoid random changes to compatibles and submit an updated version.

Best regards,
Yixin

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree
  2025-07-30  8:43   ` Yi xin Zhu
@ 2025-07-30  8:54     ` Krzysztof Kozlowski
  2025-07-31  5:44       ` Yi xin Zhu
  0 siblings, 1 reply; 14+ messages in thread
From: Krzysztof Kozlowski @ 2025-07-30  8:54 UTC (permalink / raw)
  To: Yi xin Zhu, dmaengine@vger.kernel.org, vkoul@kernel.org
  Cc: Jack Ping Chng, Suresh Nagaraj

On 30/07/2025 10:43, Yi xin Zhu wrote:
>>
>>
>>
>> Best regards,
>> Krzysztof
> 
> I'll include detailed comments to explain why the compatible change is required. 

It is unlikely you will come with good arguments for breaking ABI.
Anyway, if you keep going that way, explain in terms of ABI impact.

See also ABI docs, writing bindings and submitting patches.



Best regards,
Krzysztof

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function.
  2025-07-30  6:21   ` Krzysztof Kozlowski
@ 2025-07-30  8:59     ` Yi xin Zhu
  0 siblings, 0 replies; 14+ messages in thread
From: Yi xin Zhu @ 2025-07-30  8:59 UTC (permalink / raw)
  To: Krzysztof Kozlowski, dmaengine@vger.kernel.org, vkoul@kernel.org
  Cc: Jack Ping Chng, Suresh Nagaraj

Hi Krzysztof,

On 30/07/2025 14:21, Krzysztof Kozlowski wrote:
> Your commit msg explains nothing, so basically: no. Write proper commit msgs
> documenting hardware, not software.
> 
> Please run scripts/checkpatch.pl on the patches and fix reported warnings.
> After that, run also 'scripts/checkpatch.pl --strict' on the patches and
> (probably) fix more warnings. Some warnings can be ignored, especially from -
> -strict run, but the code here looks like it needs a fix. Feel free to get in touch if
> the warning is not clear.
> 
> <form letter>
> Please use scripts/get_maintainers.pl to get a list of necessary people and lists
> to CC. It might happen, that command when run on an older kernel, gives you
> outdated entries. Therefore please be sure you base your patches on recent
> Linux kernel.
> 
> Tools like b4 or scripts/get_maintainer.pl provide you proper list of people, so
> fix your workflow. Tools might also fail if you work on some ancient tree
> (don't, instead use mainline) or work on fork of kernel (don't, instead use
> mainline). Just use b4 and everything should be fine, although remember
> about `b4 prep --auto-to-cc` if you added new patches to the patchset.
> 
> You missed at least devicetree list (maybe more), so this won't be tested by
> automated tooling. Performing review on untested code might be a waste of
> time.
> 
> Please kindly resend and include all necessary To/Cc entries.
> </form letter>
> 
> Best regards,
> Krzysztof

I'll update the commit message and write proper descriptions to explain the patch functions.

Best regards,
Yixin


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function.
  2025-07-30  2:45 ` [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function Zhu Yixin
  2025-07-30  6:21   ` Krzysztof Kozlowski
@ 2025-07-30 16:17   ` kernel test robot
  1 sibling, 0 replies; 14+ messages in thread
From: kernel test robot @ 2025-07-30 16:17 UTC (permalink / raw)
  To: Zhu Yixin, dmaengine, vkoul
  Cc: oe-kbuild-all, jchng, sureshnagaraj, Zhu Yixin

Hi Zhu,

kernel test robot noticed the following build warnings:

[auto build test WARNING on vkoul-dmaengine/next]
[also build test WARNING on linus/master v6.16 next-20250730]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Zhu-Yixin/dmaengine-lgm-dma-Correct-ORRC-MAX-counter-value/20250730-105748
base:   https://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git next
patch link:    https://lore.kernel.org/r/20250730024547.3160871-4-yzhu%40maxlinear.com
patch subject: [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function.
config: i386-buildonly-randconfig-003-20250730 (https://download.01.org/0day-ci/archive/20250730/202507302344.XEgGylg3-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14+deb12u1) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250730/202507302344.XEgGylg3-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202507302344.XEgGylg3-lkp@intel.com/

All warnings (new ones prefixed by >>):

   drivers/dma/lgm/lgm-hdma.c: In function 'hdma_irq_stat':
>> drivers/dma/lgm/lgm-hdma.c:253:27: warning: left shift count >= width of type [-Wshift-count-overflow]
     253 |         return high ? ret << 32 : ret;
         |                           ^~
   In file included from include/linux/device.h:15,
                    from include/linux/dma-mapping.h:5,
                    from drivers/dma/lgm/lgm-hdma.c:10:
   drivers/dma/lgm/lgm-hdma.c: In function 'hdma_alloc_chan_resources':
>> drivers/dma/lgm/lgm-hdma.c:339:22: warning: format '%llx' expects argument of type 'long long unsigned int', but argument 5 has type 'dma_addr_t' {aka 'unsigned int'} [-Wformat=]
     339 |         dev_dbg(dev, "DMA CH: %u, phy addr: 0x%llx, desc cnt: %u\n",
         |                      ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   include/linux/dev_printk.h:139:49: note: in definition of macro 'dev_no_printk'
     139 |                         _dev_printk(level, dev, fmt, ##__VA_ARGS__);    \
         |                                                 ^~~
   include/linux/dev_printk.h:171:40: note: in expansion of macro 'dev_fmt'
     171 |         dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
         |                                        ^~~~~~~
   drivers/dma/lgm/lgm-hdma.c:339:9: note: in expansion of macro 'dev_dbg'
     339 |         dev_dbg(dev, "DMA CH: %u, phy addr: 0x%llx, desc cnt: %u\n",
         |         ^~~~~~~
   drivers/dma/lgm/lgm-hdma.c:339:50: note: format string is defined here
     339 |         dev_dbg(dev, "DMA CH: %u, phy addr: 0x%llx, desc cnt: %u\n",
         |                                               ~~~^
         |                                                  |
         |                                                  long long unsigned int
         |                                               %x
   drivers/dma/lgm/lgm-hdma.c: In function 'hdma_prep_slave_sg':
>> drivers/dma/lgm/lgm-hdma.c:508:29: warning: variable 'desc_hw' set but not used [-Wunused-but-set-variable]
     508 |         struct dw4_desc_hw *desc_hw;
         |                             ^~~~~~~


vim +253 drivers/dma/lgm/lgm-hdma.c

   213	
   214	static unsigned long hdma_irq_stat(struct ldma_dev *d, int high)
   215	{
   216		u32 irnen, irncr, en_off, cr_off, cid;
   217		unsigned long flags;
   218		unsigned long ret;
   219	
   220		spin_lock_irqsave(&d->dev_lock, flags);
   221	
   222		hdma_get_irq_off(high, &en_off, &cr_off);
   223	
   224		irncr = readl(d->base + cr_off);
   225		irnen = readl(d->base + en_off);
   226	
   227		if (!irncr || !irnen || !(irncr & irnen)) {
   228			writel(irncr, d->base + cr_off);
   229			spin_unlock_irqrestore(&d->dev_lock, flags);
   230			return 0;
   231		}
   232	
   233		/* disable EOP interrupt for the channel */
   234		for_each_set_bit(cid, (const unsigned long *)&irncr, d->chan_nrs) {
   235			/* select DMA channel */
   236			ldma_update_bits(d, DMA_CS_MASK, cid, DMA_CS);
   237			/* Clear EOP interrupt status */
   238			writel(readl(d->base + DMA_CIS), d->base + DMA_CIS);
   239			/* Disable EOP interrupt */
   240			writel(0, d->base + DMA_CIE);
   241		}
   242	
   243		/* ACK interrupt */
   244		writel(irncr, d->base + cr_off);
   245		irnen &= ~irncr;
   246		/* Disable interrupt */
   247		writel(irnen, d->base + en_off);
   248	
   249		spin_unlock_irqrestore(&d->dev_lock, flags);
   250	
   251		ret = irncr;
   252	
 > 253		return high ? ret << 32 : ret;
   254	}
   255	
   256	static irqreturn_t hdma_interrupt(int irq, void *dev_id)
   257	{
   258		struct ldma_dev *d = dev_id;
   259		struct hdma_chan *chan;
   260		u32 cid;
   261		unsigned long stat;
   262	
   263		stat = hdma_irq_stat(d, 0) | hdma_irq_stat(d, 1);
   264		if (!stat)
   265			return IRQ_HANDLED;
   266	
   267		for_each_set_bit(cid, (const unsigned long *)&stat, d->chan_nrs) {
   268			chan = (struct hdma_chan *)d->chans[cid].priv;
   269			tasklet_schedule(&chan->task);
   270		}
   271	
   272		return IRQ_HANDLED;
   273	}
   274	
   275	static int hdma_irq_init(struct ldma_dev *d, struct platform_device *pdev)
   276	{
   277		if (d->flags & DMA_CHAN_HW_DESC)
   278			return 0;
   279	
   280		d->irq = platform_get_irq(pdev, 0);
   281		if (d->irq < 0)
   282			return d->irq;
   283	
   284		return devm_request_irq(d->dev, d->irq, hdma_interrupt, 0,
   285					DRIVER_NAME, d);
   286	}
   287	
   288	/**
   289	 * Allocate DMA descriptor list
   290	 */
   291	static int hdma_alloc_chan_resources(struct dma_chan *dma_chan)
   292	{
   293		struct ldma_chan *c = to_ldma_chan(dma_chan);
   294		struct hdma_chan *chan = (struct hdma_chan *)c->priv;
   295		struct device *dev = c->vchan.chan.device->dev;
   296		struct dw4_desc_sw *desc_sw;
   297		struct dw4_desc_hw *desc_hw;
   298		size_t desc_sz;
   299		int i;
   300	
   301		/* HW allocate DMA descriptors */
   302		if (ldma_chan_is_hw_desc(c)) {
   303			c->flags |= CHAN_IN_USE;
   304			dev_dbg(dev, "desc in hw\n");
   305			return 0;
   306		}
   307	
   308		if (!c->desc_cnt) {
   309			dev_err(dev, "descriptor count is not set\n");
   310			return -EINVAL;
   311		}
   312	
   313		desc_sz = c->desc_cnt * sizeof(*desc_hw);
   314	
   315		c->desc_base = kzalloc(desc_sz, GFP_KERNEL);
   316		if (!c->desc_base)
   317			return -ENOMEM;
   318	
   319		c->desc_phys = dma_map_single(dev, c->desc_base,
   320					      desc_sz, DMA_TO_DEVICE);
   321		if (dma_mapping_error(dev, c->desc_phys)) {
   322			dev_err(dev, "dma mapping error for dma desc list\n");
   323			goto desc_err;
   324		}
   325	
   326		desc_sz = c->desc_cnt * sizeof(*desc_sw);
   327		chan->ds = kzalloc(desc_sz, GFP_KERNEL);
   328	
   329		if (!chan->ds)
   330			goto desc_err;
   331	
   332		desc_hw = (struct dw4_desc_hw *)c->desc_base;
   333		for (i = 0; i < c->desc_cnt; i++) {
   334			desc_sw = chan->ds + i;
   335			desc_sw->chan = c;
   336			desc_sw->desc_hw = desc_hw + i;
   337		}
   338	
 > 339		dev_dbg(dev, "DMA CH: %u, phy addr: 0x%llx, desc cnt: %u\n",
   340			c->nr, c->desc_phys, c->desc_cnt);
   341	
   342		ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt);
   343		ldma_chan_on(c);
   344	
   345		return c->desc_cnt;
   346	
   347	desc_err:
   348		kfree(c->desc_base);
   349		return -EINVAL;
   350	}
   351	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree
  2025-07-30  8:54     ` Krzysztof Kozlowski
@ 2025-07-31  5:44       ` Yi xin Zhu
  0 siblings, 0 replies; 14+ messages in thread
From: Yi xin Zhu @ 2025-07-31  5:44 UTC (permalink / raw)
  To: Krzysztof Kozlowski, dmaengine@vger.kernel.org, vkoul@kernel.org
  Cc: Jack Ping Chng, Suresh Nagaraj

Hi Krzysztof, 

On 30/07/2025 16:54, Krzysztof Kozlowski wrote:
> It is unlikely you will come with good arguments for breaking ABI.
> Anyway, if you keep going that way, explain in terms of ABI impact.
> 
> See also ABI docs, writing bindings and submitting patches.
> 
> 
> 
> Best regards,
> Krzysztof

Okay,  I'll revert that part of the changes and re-submit the patch.

Best regards,
Yixin 

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2025-07-31  5:44 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-30  2:45 [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Zhu Yixin
2025-07-30  2:45 ` [PATCH 2/5] dmaengine: lgm-dma: Correct ORRC MAX counter value Zhu Yixin
2025-07-30  2:45 ` [PATCH 3/5] dmaengine: lgm-dma: split legacy DMA and HDMA functions Zhu Yixin
2025-07-30  6:20   ` Krzysztof Kozlowski
2025-07-30  8:52     ` Yi xin Zhu
2025-07-30  2:45 ` [PATCH 4/5] dmaengine: lgm-dma: Added HDMA software mode TX function Zhu Yixin
2025-07-30  6:21   ` Krzysztof Kozlowski
2025-07-30  8:59     ` Yi xin Zhu
2025-07-30 16:17   ` kernel test robot
2025-07-30  2:45 ` [PATCH 5/5] dmaengine: lgm_dma: Added HDMA RX interrupt handle functions Zhu Yixin
2025-07-30  6:19 ` [PATCH 1/5] dmaengine: lgm-dma: Move platfrom data to device tree Krzysztof Kozlowski
2025-07-30  8:43   ` Yi xin Zhu
2025-07-30  8:54     ` Krzysztof Kozlowski
2025-07-31  5:44       ` Yi xin Zhu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox