Linux PCI subsystem development
 help / color / mirror / Atom feed
From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
To: Erick Karanja <karanja99erick@gmail.com>
Cc: <manivannan.sadhasivam@linaro.org>, <kw@linux.com>,
	<kishon@kernel.org>, <bhelgaas@google.com>,
	<linux-pci@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<julia.lawall@inria.fr>
Subject: Re: [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with scoped_guard()
Date: Fri, 2 May 2025 09:41:55 +0100	[thread overview]
Message-ID: <20250502094155.00003f74@huawei.com> (raw)
In-Reply-To: <49b27386eb57432d204153794bfd20f78aa72253.1746114596.git.karanja99erick@gmail.com>

On Thu,  1 May 2025 18:56:11 +0300
Erick Karanja <karanja99erick@gmail.com> wrote:

In the patch name always mention which driver it is.


> This refactor replaces manual mutex lock/unlock with scoped_guard()
> in places where early exits use goto. Using scoped_guard()
> avoids error-prone unlock paths and simplifies control flow.

In this case that only works because you've replicated the other
unwinding coding in various error paths.  That's more error prone
so I'm not convinced this particular case is a good use of scoped cleanup.

> 
> Signed-off-by: Erick Karanja <karanja99erick@gmail.com>
> ---
>  drivers/pci/endpoint/functions/pci-epf-mhi.c | 358 +++++++++----------
>  1 file changed, 166 insertions(+), 192 deletions(-)
> 
> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> index 6643a88c7a0c..57ef522c3d07 100644
> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> @@ -323,57 +323,52 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
>  	if (buf_info->size < SZ_4K)
>  		return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
>  
> -	mutex_lock(&epf_mhi->lock);
> -
> -	config.direction = DMA_DEV_TO_MEM;
> -	config.src_addr = buf_info->host_addr;
> +	scoped_guard(mutex, &epf_mhi->lock) {

I'd be tempted to use
guard(mutex)(&epf_mhi->lock); given your scope goes to the end
of the function.

That will reduce the diff and make it easier to spot any functional changes...

> +		config.direction = DMA_DEV_TO_MEM;
> +		config.src_addr = buf_info->host_addr;
>  
> -	ret = dmaengine_slave_config(chan, &config);
> -	if (ret) {
> -		dev_err(dev, "Failed to configure DMA channel\n");
> -		goto err_unlock;
> -	}
> +		ret = dmaengine_slave_config(chan, &config);
> +		if (ret) {
> +			dev_err(dev, "Failed to configure DMA channel\n");
> +			return ret;
> +		}
>  
> -	dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> -				  DMA_FROM_DEVICE);
> -	ret = dma_mapping_error(dma_dev, dst_addr);
> -	if (ret) {
> -		dev_err(dev, "Failed to map remote memory\n");
> -		goto err_unlock;
> -	}
> +		dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> +					  DMA_FROM_DEVICE);
> +		ret = dma_mapping_error(dma_dev, dst_addr);
> +		if (ret) {
> +			dev_err(dev, "Failed to map remote memory\n");
> +			return ret;
> +		}
>  
> -	desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> -					   DMA_DEV_TO_MEM,
> -					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> -	if (!desc) {
> -		dev_err(dev, "Failed to prepare DMA\n");
> -		ret = -EIO;
> -		goto err_unmap;
> -	}
> +		desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> +						   DMA_DEV_TO_MEM,
> +						   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> +		if (!desc) {
> +			dev_err(dev, "Failed to prepare DMA\n");
> +			dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);

In cases like this where is other cleanup to do the advantages of scoped
cleanup are greatly reduced.  I'm not sure it is a good idea here.



> +			return -EIO;
> +		}
>  
> -	desc->callback = pci_epf_mhi_dma_callback;
> -	desc->callback_param = &complete;
> +		desc->callback = pci_epf_mhi_dma_callback;
> +		desc->callback_param = &complete;
>  
> -	cookie = dmaengine_submit(desc);
> -	ret = dma_submit_error(cookie);
> -	if (ret) {
> -		dev_err(dev, "Failed to do DMA submit\n");
> -		goto err_unmap;
> -	}
> +		cookie = dmaengine_submit(desc);
> +		ret = dma_submit_error(cookie);
> +		if (ret) {
> +			dev_err(dev, "Failed to do DMA submit\n");
> +			dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> +			return ret;
> +		}
>  
> -	dma_async_issue_pending(chan);
> -	ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> -	if (!ret) {
> -		dev_err(dev, "DMA transfer timeout\n");
> -		dmaengine_terminate_sync(chan);
> -		ret = -ETIMEDOUT;

This path previously hit the err_unmap condition and now it doesn't.
Why that functional change?

> +		dma_async_issue_pending(chan);
> +		ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> +		if (!ret) {
> +			dev_err(dev, "DMA transfer timeout\n");
> +			dmaengine_terminate_sync(chan);
> +			ret = -ETIMEDOUT;

return -ETIMEDOUT;

> +		}
>  	}
> -
Even in good path were were previously unmapping and now we aren't.
> -err_unmap:
> -	dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> -err_unlock:
> -	mutex_unlock(&epf_mhi->lock);
> -
>  	return ret;
return 0; //assuming no way to get here with ret set.

>  }
>  
> @@ -394,57 +389,52 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
>  	if (buf_info->size < SZ_4K)
>  		return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
>  
> -	mutex_lock(&epf_mhi->lock);
> -
> -	config.direction = DMA_MEM_TO_DEV;
> -	config.dst_addr = buf_info->host_addr;
> +	scoped_guard(mutex, &epf_mhi->lock) {
> +		config.direction = DMA_MEM_TO_DEV;
> +		config.dst_addr = buf_info->host_addr;
>  
> -	ret = dmaengine_slave_config(chan, &config);
> -	if (ret) {
> -		dev_err(dev, "Failed to configure DMA channel\n");
> -		goto err_unlock;
> -	}
> +		ret = dmaengine_slave_config(chan, &config);
> +		if (ret) {
> +			dev_err(dev, "Failed to configure DMA channel\n");
> +			return ret;
> +		}
>  
> -	src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> -				  DMA_TO_DEVICE);
> -	ret = dma_mapping_error(dma_dev, src_addr);
> -	if (ret) {
> -		dev_err(dev, "Failed to map remote memory\n");
> -		goto err_unlock;
> -	}
> +		src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> +					  DMA_TO_DEVICE);
> +		ret = dma_mapping_error(dma_dev, src_addr);
> +		if (ret) {
> +			dev_err(dev, "Failed to map remote memory\n");
> +			return ret;
> +		}
>  
> -	desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> -					   DMA_MEM_TO_DEV,
> -					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> -	if (!desc) {
> -		dev_err(dev, "Failed to prepare DMA\n");
> -		ret = -EIO;
> -		goto err_unmap;
> -	}
> +		desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> +						   DMA_MEM_TO_DEV,
> +						   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> +		if (!desc) {
> +			dev_err(dev, "Failed to prepare DMA\n");
> +			dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> +			return -EIO;
> +		}
>  
> -	desc->callback = pci_epf_mhi_dma_callback;
> -	desc->callback_param = &complete;
> +		desc->callback = pci_epf_mhi_dma_callback;
> +		desc->callback_param = &complete;
>  
> -	cookie = dmaengine_submit(desc);
> -	ret = dma_submit_error(cookie);
> -	if (ret) {
> -		dev_err(dev, "Failed to do DMA submit\n");
> -		goto err_unmap;
> -	}
> +		cookie = dmaengine_submit(desc);
> +		ret = dma_submit_error(cookie);
> +		if (ret) {
> +			dev_err(dev, "Failed to do DMA submit\n");
> +			dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> +			return ret;
> +		}
>  
> -	dma_async_issue_pending(chan);
> -	ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> -	if (!ret) {
> -		dev_err(dev, "DMA transfer timeout\n");
> -		dmaengine_terminate_sync(chan);
> -		ret = -ETIMEDOUT;
> +		dma_async_issue_pending(chan);
> +		ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> +		if (!ret) {
> +			dev_err(dev, "DMA transfer timeout\n");
> +			dmaengine_terminate_sync(chan);
> +			ret = -ETIMEDOUT;
return -EITMEDOUT;
Same issue with logic change to no unmap any more.

> +		}
>  	}
> -
> -err_unmap:
> -	dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> -err_unlock:
> -	mutex_unlock(&epf_mhi->lock);
> -
>  	return ret;

return 0; if no way to get here without an error set

>  }
>  
> @@ -497,67 +487,59 @@ static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
>  	dma_addr_t dst_addr;
>  	int ret;
>  
> -	mutex_lock(&epf_mhi->lock);
> +	scoped_guard(mutex, &epf_mhi->lock) {
> +		config.direction = DMA_DEV_TO_MEM;
> +		config.src_addr = buf_info->host_addr;
>  
> -	config.direction = DMA_DEV_TO_MEM;
> -	config.src_addr = buf_info->host_addr;
> +		ret = dmaengine_slave_config(chan, &config);
> +		if (ret) {
> +			dev_err(dev, "Failed to configure DMA channel\n");
> +			return ret;
> +		}
>  
> -	ret = dmaengine_slave_config(chan, &config);
> -	if (ret) {
> -		dev_err(dev, "Failed to configure DMA channel\n");
> -		goto err_unlock;
> -	}
> +		dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> +					  DMA_FROM_DEVICE);
> +		ret = dma_mapping_error(dma_dev, dst_addr);
> +		if (ret) {
> +			dev_err(dev, "Failed to map remote memory\n");
> +			return ret;
> +		}
>  
> -	dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> -				  DMA_FROM_DEVICE);
> -	ret = dma_mapping_error(dma_dev, dst_addr);
> -	if (ret) {
> -		dev_err(dev, "Failed to map remote memory\n");
> -		goto err_unlock;
> -	}
> +		desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> +						   DMA_DEV_TO_MEM,
> +						   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> +		if (!desc) {
> +			dev_err(dev, "Failed to prepare DMA\n");
> +			dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> +			return -EIO;
> +		}
>  
> -	desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> -					   DMA_DEV_TO_MEM,
> -					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> -	if (!desc) {
> -		dev_err(dev, "Failed to prepare DMA\n");
> -		ret = -EIO;
> -		goto err_unmap;
> -	}
> +		transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> +		if (!transfer) {
> +			dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> +			return -ENOMEM;
> +		}
>  
> -	transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> -	if (!transfer) {
> -		ret = -ENOMEM;
> -		goto err_unmap;
> -	}
> +		transfer->epf_mhi = epf_mhi;
> +		transfer->paddr = dst_addr;
> +		transfer->size = buf_info->size;
> +		transfer->dir = DMA_FROM_DEVICE;
> +		memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
>  
> -	transfer->epf_mhi = epf_mhi;
> -	transfer->paddr = dst_addr;
> -	transfer->size = buf_info->size;
> -	transfer->dir = DMA_FROM_DEVICE;
> -	memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
> +		desc->callback = pci_epf_mhi_dma_async_callback;
> +		desc->callback_param = transfer;
>  
> -	desc->callback = pci_epf_mhi_dma_async_callback;
> -	desc->callback_param = transfer;
> +		cookie = dmaengine_submit(desc);
> +		ret = dma_submit_error(cookie);
> +		if (ret) {
> +			dev_err(dev, "Failed to do DMA submit\n");
> +			kfree(transfer);
> +			dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> +			return ret;
> +		}
>  
> -	cookie = dmaengine_submit(desc);
> -	ret = dma_submit_error(cookie);
> -	if (ret) {
> -		dev_err(dev, "Failed to do DMA submit\n");
> -		goto err_free_transfer;
> +		dma_async_issue_pending(chan);
>  	}
> -
> -	dma_async_issue_pending(chan);
> -
> -	goto err_unlock;
> -
> -err_free_transfer:
> -	kfree(transfer);
> -err_unmap:
> -	dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> -err_unlock:
> -	mutex_unlock(&epf_mhi->lock);
> -
>  	return ret;
>  }
>  
> @@ -576,67 +558,59 @@ static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
>  	dma_addr_t src_addr;
>  	int ret;
>  
> -	mutex_lock(&epf_mhi->lock);
> +	scoped_guard(mutex, &epf_mhi->lock) {
> +		config.direction = DMA_MEM_TO_DEV;
> +		config.dst_addr = buf_info->host_addr;
>  
> -	config.direction = DMA_MEM_TO_DEV;
> -	config.dst_addr = buf_info->host_addr;
> +		ret = dmaengine_slave_config(chan, &config);
> +		if (ret) {
> +			dev_err(dev, "Failed to configure DMA channel\n");
> +			return ret;
> +		}
>  
> -	ret = dmaengine_slave_config(chan, &config);
> -	if (ret) {
> -		dev_err(dev, "Failed to configure DMA channel\n");
> -		goto err_unlock;
> -	}
> +		src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> +					  DMA_TO_DEVICE);
> +		ret = dma_mapping_error(dma_dev, src_addr);
> +		if (ret) {
> +			dev_err(dev, "Failed to map remote memory\n");
> +			return ret;
> +		}
>  
> -	src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> -				  DMA_TO_DEVICE);
> -	ret = dma_mapping_error(dma_dev, src_addr);
> -	if (ret) {
> -		dev_err(dev, "Failed to map remote memory\n");
> -		goto err_unlock;
> -	}
> +		desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> +						   DMA_MEM_TO_DEV,
> +						   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> +		if (!desc) {
> +			dev_err(dev, "Failed to prepare DMA\n");
> +			dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> +			return -EIO;
> +		}
>  
> -	desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> -					   DMA_MEM_TO_DEV,
> -					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> -	if (!desc) {
> -		dev_err(dev, "Failed to prepare DMA\n");
> -		ret = -EIO;
> -		goto err_unmap;
> -	}
> +		transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> +		if (!transfer) {
> +			dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> +			return -ENOMEM;
> +		}
>  
> -	transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> -	if (!transfer) {
> -		ret = -ENOMEM;
> -		goto err_unmap;
> -	}
> +		transfer->epf_mhi = epf_mhi;
> +		transfer->paddr = src_addr;
> +		transfer->size = buf_info->size;
> +		transfer->dir = DMA_TO_DEVICE;
> +		memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
>  
> -	transfer->epf_mhi = epf_mhi;
> -	transfer->paddr = src_addr;
> -	transfer->size = buf_info->size;
> -	transfer->dir = DMA_TO_DEVICE;
> -	memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
> +		desc->callback = pci_epf_mhi_dma_async_callback;
> +		desc->callback_param = transfer;
>  
> -	desc->callback = pci_epf_mhi_dma_async_callback;
> -	desc->callback_param = transfer;
> +		cookie = dmaengine_submit(desc);
> +		ret = dma_submit_error(cookie);
> +		if (ret) {
> +			dev_err(dev, "Failed to do DMA submit\n");
> +			kfree(transfer);
> +			dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> +			return ret;
> +		}
>  
> -	cookie = dmaengine_submit(desc);
> -	ret = dma_submit_error(cookie);
> -	if (ret) {
> -		dev_err(dev, "Failed to do DMA submit\n");
> -		goto err_free_transfer;
> +		dma_async_issue_pending(chan);
>  	}
> -
> -	dma_async_issue_pending(chan);
> -
> -	goto err_unlock;
> -
> -err_free_transfer:
> -	kfree(transfer);
> -err_unmap:
> -	dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> -err_unlock:
> -	mutex_unlock(&epf_mhi->lock);
> -
>  	return ret;
>  }
>  


  reply	other threads:[~2025-05-02  8:42 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-01 15:56 [PATCH 0/2] Use scoped_guard to safely manage mutex locking Erick Karanja
2025-05-01 15:56 ` [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with scoped_guard() Erick Karanja
2025-05-02  8:41   ` Jonathan Cameron [this message]
2025-05-01 15:56 ` [PATCH 2/2] PCI: endpoint: Use scoped_guard for manual mutex lock/unlock Erick Karanja
2025-05-02  8:43   ` Jonathan Cameron

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250502094155.00003f74@huawei.com \
    --to=jonathan.cameron@huawei.com \
    --cc=bhelgaas@google.com \
    --cc=julia.lawall@inria.fr \
    --cc=karanja99erick@gmail.com \
    --cc=kishon@kernel.org \
    --cc=kw@linux.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=manivannan.sadhasivam@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox