public inbox for dmaengine@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] dmaengine: dma-axi-dmac: Some memory related fixes
@ 2026-03-26 13:37 Nuno Sá via B4 Relay
  2026-03-26 13:37 ` [PATCH 1/2] dmaengine: dma-axi-dmac: Defer freeing DMA descriptors Nuno Sá via B4 Relay
  2026-03-26 13:37 ` [PATCH 2/2] dmaengine: dma-axi-dmac: fix use-after-free on unbind Nuno Sá via B4 Relay
  0 siblings, 2 replies; 4+ messages in thread
From: Nuno Sá via B4 Relay @ 2026-03-26 13:37 UTC (permalink / raw)
  To: dmaengine, linux-kernel
  Cc: Lars-Peter Clausen, Vinod Koul, Frank Li, Eliza Balas

This series aims to fix some issues with axi-dmac driver related to
memory:

1. When testing the IP on a microblaze based platform, we get vmalloced
   memory when allocation DMAC descriptors which will lead to BUG() when
   releasing them. More on the commit message.
2. The second is related with a well known issues with devm allocations
   of reference counted objects on a provider-consumer relationship.
   Seems to be a knows issue in dmaengine but fix it in the AXI-DMAC
   driver by properly implementing .the device_release() callback.

Didn't add any fixes tag because for 1), it was not an issue when the
drivers was first implemented (just triggered very recently) and for 2),
because it seems like a well known issue. Anyways, for 2) seems more
reasonable to have a fixes tag (IMO) if you want me to add one.

Also note that my signoff on Eliza patch is merely because I'm sending
the patch on her behalf. I had not part in the solution (just improved
comments and commit message a bit).

Signed-off-by: Nuno Sá <nuno.sa@analog.com>
---
Eliza Balas (1):
      dmaengine: dma-axi-dmac: Defer freeing DMA descriptors

Nuno Sá (1):
      dmaengine: dma-axi-dmac: fix use-after-free on unbind

 drivers/dma/dma-axi-dmac.c | 95 +++++++++++++++++++++++++++++++++++++---------
 1 file changed, 78 insertions(+), 17 deletions(-)
---
base-commit: b7560798466a07d9c3fb011698e92c335ab28baf
change-id: 20260325-dma-dmac-handle-vunmap-84a06df7d133
--

Thanks!
- Nuno Sá



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] dmaengine: dma-axi-dmac: Defer freeing DMA descriptors
  2026-03-26 13:37 [PATCH 0/2] dmaengine: dma-axi-dmac: Some memory related fixes Nuno Sá via B4 Relay
@ 2026-03-26 13:37 ` Nuno Sá via B4 Relay
  2026-03-26 15:14   ` Nuno Sá
  2026-03-26 13:37 ` [PATCH 2/2] dmaengine: dma-axi-dmac: fix use-after-free on unbind Nuno Sá via B4 Relay
  1 sibling, 1 reply; 4+ messages in thread
From: Nuno Sá via B4 Relay @ 2026-03-26 13:37 UTC (permalink / raw)
  To: dmaengine, linux-kernel
  Cc: Lars-Peter Clausen, Vinod Koul, Frank Li, Eliza Balas

From: Eliza Balas <eliza.balas@analog.com>

This IP core can be used in architectures (like Microblaze) where DMA
descriptors are allocated with vmalloc(). Hence, given that freeing the
descriptors happen in softirq context, vunmpap() will BUG().

To solve the above, we setup a work item during allocation of the
descriptors and schedule in softirq context. Hence, the actual freeing
happens in threaded context.

Signed-off-by: Eliza Balas <eliza.balas@analog.com>
Signed-off-by: Nuno Sá <nuno.sa@analog.com>
---
 drivers/dma/dma-axi-dmac.c | 48 +++++++++++++++++++++++++++++++++-------------
 1 file changed, 35 insertions(+), 13 deletions(-)

diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 45c2c8e4bc45..df2668064ea2 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -133,6 +133,8 @@ struct axi_dmac_desc {
 	struct virt_dma_desc vdesc;
 	struct axi_dmac_chan *chan;
 
+	struct work_struct sched_work;
+
 	bool cyclic;
 	bool cyclic_eot;
 	bool have_partial_xfer;
@@ -650,6 +652,26 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
 }
 
+static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
+{
+	struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
+	struct device *dev = dmac->dma_dev.dev;
+	struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
+	dma_addr_t hw_phys = desc->sg[0].hw_phys;
+
+	dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
+			  hw, hw_phys);
+	kfree(desc);
+}
+
+static void axi_dmac_free_desc_schedule_work(struct work_struct *work)
+{
+	struct axi_dmac_desc *desc = container_of(work, struct axi_dmac_desc,
+						  sched_work);
+
+	axi_dmac_free_desc(desc);
+}
+
 static struct axi_dmac_desc *
 axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
 {
@@ -687,21 +709,18 @@ axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
 	/* The last hardware descriptor will trigger an interrupt */
 	desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ;
 
+	/*
+	 * We need to setup a work item because this IP can be used on archs
+	 * that rely on vmalloced memory for descriptors. And given that freeing
+	 * the descriptors happens in softirq context, vunmpap() will BUG().
+	 * Hence, setup the worker so that we can queue it and free the
+	 * descriptor in threaded context.
+	 */
+	INIT_WORK(&desc->sched_work, axi_dmac_free_desc_schedule_work);
+
 	return desc;
 }
 
-static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
-{
-	struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
-	struct device *dev = dmac->dma_dev.dev;
-	struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
-	dma_addr_t hw_phys = desc->sg[0].hw_phys;
-
-	dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
-			  hw, hw_phys);
-	kfree(desc);
-}
-
 static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
 	enum dma_transfer_direction direction, dma_addr_t addr,
 	unsigned int num_periods, unsigned int period_len,
@@ -942,7 +961,10 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
 
 static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
 {
-	axi_dmac_free_desc(to_axi_dmac_desc(vdesc));
+	struct axi_dmac_desc *desc = to_axi_dmac_desc(vdesc);
+
+	/* See the comment in axi_dmac_alloc_desc() for the why! */
+	schedule_work(&desc->sched_work);
 }
 
 static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)

-- 
2.53.0



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] dmaengine: dma-axi-dmac: fix use-after-free on unbind
  2026-03-26 13:37 [PATCH 0/2] dmaengine: dma-axi-dmac: Some memory related fixes Nuno Sá via B4 Relay
  2026-03-26 13:37 ` [PATCH 1/2] dmaengine: dma-axi-dmac: Defer freeing DMA descriptors Nuno Sá via B4 Relay
@ 2026-03-26 13:37 ` Nuno Sá via B4 Relay
  1 sibling, 0 replies; 4+ messages in thread
From: Nuno Sá via B4 Relay @ 2026-03-26 13:37 UTC (permalink / raw)
  To: dmaengine, linux-kernel; +Cc: Lars-Peter Clausen, Vinod Koul, Frank Li

From: Nuno Sá <nuno.sa@analog.com>

The DMA device lifetime can extend beyond the platform driver unbind if
DMA channels are still referenced by client drivers. This leads to
use-after-free when the devm-managed memory is freed on unbind but the
DMA device callbacks still access it.

Fix this by:
 - Allocating axi_dmac with kzalloc_obj() instead of devm_kzalloc() so
its lifetime is not tied to the platform device.
 - Implementing the device_release callback that so that we can free
the object when reference count gets to 0 (no users).
 - Adding an 'unbound' flag protected by the vchan lock that is set
during driver removal, preventing MMIO accesses after the device has been
unbound.

Signed-off-by: Nuno Sá <nuno.sa@analog.com>
---
 drivers/dma/dma-axi-dmac.c | 47 ++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 43 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index df2668064ea2..99454e096588 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -176,6 +176,8 @@ struct axi_dmac {
 
 	struct dma_device dma_dev;
 	struct axi_dmac_chan chan;
+
+	bool unbound;
 };
 
 static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
@@ -184,6 +186,11 @@ static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
 		dma_dev);
 }
 
+static struct axi_dmac *dev_to_axi_dmac(struct dma_device *dev)
+{
+	return container_of(dev, struct axi_dmac, dma_dev);
+}
+
 static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
 {
 	return container_of(c, struct axi_dmac_chan, vchan.chan);
@@ -616,6 +623,11 @@ static int axi_dmac_terminate_all(struct dma_chan *c)
 	LIST_HEAD(head);
 
 	spin_lock_irqsave(&chan->vchan.lock, flags);
+	if (dmac->unbound) {
+		/* We're gone */
+		spin_unlock_irqrestore(&chan->vchan.lock, flags);
+		return -ENODEV;
+	}
 	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
 	chan->next_desc = NULL;
 	vchan_get_all_descriptors(&chan->vchan, &head);
@@ -644,9 +656,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
 	if (chan->hw_sg)
 		ctrl |= AXI_DMAC_CTRL_ENABLE_SG;
 
-	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
-
 	spin_lock_irqsave(&chan->vchan.lock, flags);
+	if (dmac->unbound) {
+		spin_unlock_irqrestore(&chan->vchan.lock, flags);
+		return;
+	}
+	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
 	if (vchan_issue_pending(&chan->vchan))
 		axi_dmac_start_transfer(chan);
 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -1206,6 +1221,14 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
 	return 0;
 }
 
+static void axi_dmac_release(struct dma_device *dma_dev)
+{
+	struct axi_dmac *dmac = dev_to_axi_dmac(dma_dev);
+
+	put_device(dma_dev->dev);
+	kfree(dmac);
+}
+
 static void axi_dmac_tasklet_kill(void *task)
 {
 	tasklet_kill(task);
@@ -1216,6 +1239,16 @@ static void axi_dmac_free_dma_controller(void *of_node)
 	of_dma_controller_free(of_node);
 }
 
+static void axi_dmac_disable(void *__dmac)
+{
+	struct axi_dmac *dmac = __dmac;
+
+	spin_lock(&dmac->chan.vchan.lock);
+	dmac->unbound = true;
+	spin_unlock(&dmac->chan.vchan.lock);
+	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
+}
+
 static int axi_dmac_probe(struct platform_device *pdev)
 {
 	struct dma_device *dma_dev;
@@ -1225,7 +1258,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
 	u32 irq_mask = 0;
 	int ret;
 
-	dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+	dmac = kzalloc_obj(struct axi_dmac);
 	if (!dmac)
 		return -ENOMEM;
 
@@ -1270,9 +1303,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
 	dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
 	dma_dev->device_terminate_all = axi_dmac_terminate_all;
 	dma_dev->device_synchronize = axi_dmac_synchronize;
-	dma_dev->dev = &pdev->dev;
+	dma_dev->dev = get_device(&pdev->dev);
 	dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
 	dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
+	dma_dev->device_release = axi_dmac_release;
 	dma_dev->directions = BIT(dmac->chan.direction);
 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 	dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */
@@ -1326,6 +1360,11 @@ static int axi_dmac_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
+	/* So that we can mark the device as unbound and disable it */
+	ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_disable, dmac);
+	if (ret)
+		return ret;
+
 	ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler,
 			       IRQF_SHARED, dev_name(&pdev->dev), dmac);
 	if (ret)

-- 
2.53.0



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] dmaengine: dma-axi-dmac: Defer freeing DMA descriptors
  2026-03-26 13:37 ` [PATCH 1/2] dmaengine: dma-axi-dmac: Defer freeing DMA descriptors Nuno Sá via B4 Relay
@ 2026-03-26 15:14   ` Nuno Sá
  0 siblings, 0 replies; 4+ messages in thread
From: Nuno Sá @ 2026-03-26 15:14 UTC (permalink / raw)
  To: Nuno Sá
  Cc: dmaengine, linux-kernel, Lars-Peter Clausen, Vinod Koul, Frank Li,
	Eliza Balas

On Thu, Mar 26, 2026 at 01:37:35PM +0000, Nuno Sá wrote:
> From: Eliza Balas <eliza.balas@analog.com>
> 
> This IP core can be used in architectures (like Microblaze) where DMA
> descriptors are allocated with vmalloc(). Hence, given that freeing the
> descriptors happen in softirq context, vunmpap() will BUG().
> 
> To solve the above, we setup a work item during allocation of the
> descriptors and schedule in softirq context. Hence, the actual freeing
> happens in threaded context.
> 
> Signed-off-by: Eliza Balas <eliza.balas@analog.com>
> Signed-off-by: Nuno Sá <nuno.sa@analog.com>
> ---
>  drivers/dma/dma-axi-dmac.c | 48 +++++++++++++++++++++++++++++++++-------------
>  1 file changed, 35 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
> index 45c2c8e4bc45..df2668064ea2 100644
> --- a/drivers/dma/dma-axi-dmac.c
> +++ b/drivers/dma/dma-axi-dmac.c
> @@ -133,6 +133,8 @@ struct axi_dmac_desc {
>  	struct virt_dma_desc vdesc;
>  	struct axi_dmac_chan *chan;
>  
> +	struct work_struct sched_work;

Ahh, just realized that workqueue.h needs to be included. Will wait for
some feedback before v2.

- Nuno Sá

> +
>  	bool cyclic;
>  	bool cyclic_eot;
>  	bool have_partial_xfer;
> @@ -650,6 +652,26 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
>  	spin_unlock_irqrestore(&chan->vchan.lock, flags);
>  }
>  
> +static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
> +{
> +	struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
> +	struct device *dev = dmac->dma_dev.dev;
> +	struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
> +	dma_addr_t hw_phys = desc->sg[0].hw_phys;
> +
> +	dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
> +			  hw, hw_phys);
> +	kfree(desc);
> +}
> +
> +static void axi_dmac_free_desc_schedule_work(struct work_struct *work)
> +{
> +	struct axi_dmac_desc *desc = container_of(work, struct axi_dmac_desc,
> +						  sched_work);
> +
> +	axi_dmac_free_desc(desc);
> +}
> +
>  static struct axi_dmac_desc *
>  axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
>  {
> @@ -687,21 +709,18 @@ axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
>  	/* The last hardware descriptor will trigger an interrupt */
>  	desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ;
>  
> +	/*
> +	 * We need to setup a work item because this IP can be used on archs
> +	 * that rely on vmalloced memory for descriptors. And given that freeing
> +	 * the descriptors happens in softirq context, vunmpap() will BUG().
> +	 * Hence, setup the worker so that we can queue it and free the
> +	 * descriptor in threaded context.
> +	 */
> +	INIT_WORK(&desc->sched_work, axi_dmac_free_desc_schedule_work);
> +
>  	return desc;
>  }
>  
> -static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
> -{
> -	struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
> -	struct device *dev = dmac->dma_dev.dev;
> -	struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
> -	dma_addr_t hw_phys = desc->sg[0].hw_phys;
> -
> -	dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
> -			  hw, hw_phys);
> -	kfree(desc);
> -}
> -
>  static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
>  	enum dma_transfer_direction direction, dma_addr_t addr,
>  	unsigned int num_periods, unsigned int period_len,
> @@ -942,7 +961,10 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
>  
>  static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
>  {
> -	axi_dmac_free_desc(to_axi_dmac_desc(vdesc));
> +	struct axi_dmac_desc *desc = to_axi_dmac_desc(vdesc);
> +
> +	/* See the comment in axi_dmac_alloc_desc() for the why! */
> +	schedule_work(&desc->sched_work);
>  }
>  
>  static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
> 
> -- 
> 2.53.0
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-03-26 15:14 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-26 13:37 [PATCH 0/2] dmaengine: dma-axi-dmac: Some memory related fixes Nuno Sá via B4 Relay
2026-03-26 13:37 ` [PATCH 1/2] dmaengine: dma-axi-dmac: Defer freeing DMA descriptors Nuno Sá via B4 Relay
2026-03-26 15:14   ` Nuno Sá
2026-03-26 13:37 ` [PATCH 2/2] dmaengine: dma-axi-dmac: fix use-after-free on unbind Nuno Sá via B4 Relay

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox