* [PATCH 0/2] Use scoped_guard to safely manage mutex locking
@ 2025-05-01 15:56 Erick Karanja
2025-05-01 15:56 ` [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with scoped_guard() Erick Karanja
2025-05-01 15:56 ` [PATCH 2/2] PCI: endpoint: Use scoped_guard for manual mutex lock/unlock Erick Karanja
0 siblings, 2 replies; 5+ messages in thread
From: Erick Karanja @ 2025-05-01 15:56 UTC (permalink / raw)
To: manivannan.sadhasivam, kw
Cc: kishon, bhelgaas, linux-pci, linux-kernel, julia.lawall,
Erick Karanja
This change replaces manual mutex locking/unlocking with scoped_guard
to ensure mutexes are always released safely, even in the presence of
early returns or errors. It simplifies control flow and eliminates the
need for explicit goto-based cleanup blocks.
Erick Karanja (2):
PCI: endpoint: Replace manual mutex handling with scoped_guard()
PCI: endpoint: Use scoped_guard for manual mutex lock/unlock
drivers/pci/endpoint/functions/pci-epf-mhi.c | 358 +++++++++----------
drivers/pci/endpoint/pci-epc-core.c | 53 ++-
2 files changed, 190 insertions(+), 221 deletions(-)
--
2.43.0
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with scoped_guard()
2025-05-01 15:56 [PATCH 0/2] Use scoped_guard to safely manage mutex locking Erick Karanja
@ 2025-05-01 15:56 ` Erick Karanja
2025-05-02 8:41 ` Jonathan Cameron
2025-05-01 15:56 ` [PATCH 2/2] PCI: endpoint: Use scoped_guard for manual mutex lock/unlock Erick Karanja
1 sibling, 1 reply; 5+ messages in thread
From: Erick Karanja @ 2025-05-01 15:56 UTC (permalink / raw)
To: manivannan.sadhasivam, kw
Cc: kishon, bhelgaas, linux-pci, linux-kernel, julia.lawall,
Erick Karanja
This refactor replaces manual mutex lock/unlock with scoped_guard()
in places where early exits use goto. Using scoped_guard()
avoids error-prone unlock paths and simplifies control flow.
Signed-off-by: Erick Karanja <karanja99erick@gmail.com>
---
drivers/pci/endpoint/functions/pci-epf-mhi.c | 358 +++++++++----------
1 file changed, 166 insertions(+), 192 deletions(-)
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 6643a88c7a0c..57ef522c3d07 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -323,57 +323,52 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
if (buf_info->size < SZ_4K)
return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
- mutex_lock(&epf_mhi->lock);
-
- config.direction = DMA_DEV_TO_MEM;
- config.src_addr = buf_info->host_addr;
+ scoped_guard(mutex, &epf_mhi->lock) {
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = buf_info->host_addr;
- ret = dmaengine_slave_config(chan, &config);
- if (ret) {
- dev_err(dev, "Failed to configure DMA channel\n");
- goto err_unlock;
- }
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
- dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
- DMA_FROM_DEVICE);
- ret = dma_mapping_error(dma_dev, dst_addr);
- if (ret) {
- dev_err(dev, "Failed to map remote memory\n");
- goto err_unlock;
- }
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ return ret;
+ }
- desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
- DMA_DEV_TO_MEM,
- DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(dev, "Failed to prepare DMA\n");
- ret = -EIO;
- goto err_unmap;
- }
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+ return -EIO;
+ }
- desc->callback = pci_epf_mhi_dma_callback;
- desc->callback_param = &complete;
+ desc->callback = pci_epf_mhi_dma_callback;
+ desc->callback_param = &complete;
- cookie = dmaengine_submit(desc);
- ret = dma_submit_error(cookie);
- if (ret) {
- dev_err(dev, "Failed to do DMA submit\n");
- goto err_unmap;
- }
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+ return ret;
+ }
- dma_async_issue_pending(chan);
- ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
- if (!ret) {
- dev_err(dev, "DMA transfer timeout\n");
- dmaengine_terminate_sync(chan);
- ret = -ETIMEDOUT;
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ }
}
-
-err_unmap:
- dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
-err_unlock:
- mutex_unlock(&epf_mhi->lock);
-
return ret;
}
@@ -394,57 +389,52 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
if (buf_info->size < SZ_4K)
return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
- mutex_lock(&epf_mhi->lock);
-
- config.direction = DMA_MEM_TO_DEV;
- config.dst_addr = buf_info->host_addr;
+ scoped_guard(mutex, &epf_mhi->lock) {
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr = buf_info->host_addr;
- ret = dmaengine_slave_config(chan, &config);
- if (ret) {
- dev_err(dev, "Failed to configure DMA channel\n");
- goto err_unlock;
- }
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
- src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
- DMA_TO_DEVICE);
- ret = dma_mapping_error(dma_dev, src_addr);
- if (ret) {
- dev_err(dev, "Failed to map remote memory\n");
- goto err_unlock;
- }
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ return ret;
+ }
- desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
- DMA_MEM_TO_DEV,
- DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(dev, "Failed to prepare DMA\n");
- ret = -EIO;
- goto err_unmap;
- }
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+ return -EIO;
+ }
- desc->callback = pci_epf_mhi_dma_callback;
- desc->callback_param = &complete;
+ desc->callback = pci_epf_mhi_dma_callback;
+ desc->callback_param = &complete;
- cookie = dmaengine_submit(desc);
- ret = dma_submit_error(cookie);
- if (ret) {
- dev_err(dev, "Failed to do DMA submit\n");
- goto err_unmap;
- }
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+ return ret;
+ }
- dma_async_issue_pending(chan);
- ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
- if (!ret) {
- dev_err(dev, "DMA transfer timeout\n");
- dmaengine_terminate_sync(chan);
- ret = -ETIMEDOUT;
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ }
}
-
-err_unmap:
- dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
-err_unlock:
- mutex_unlock(&epf_mhi->lock);
-
return ret;
}
@@ -497,67 +487,59 @@ static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
dma_addr_t dst_addr;
int ret;
- mutex_lock(&epf_mhi->lock);
+ scoped_guard(mutex, &epf_mhi->lock) {
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = buf_info->host_addr;
- config.direction = DMA_DEV_TO_MEM;
- config.src_addr = buf_info->host_addr;
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
- ret = dmaengine_slave_config(chan, &config);
- if (ret) {
- dev_err(dev, "Failed to configure DMA channel\n");
- goto err_unlock;
- }
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ return ret;
+ }
- dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
- DMA_FROM_DEVICE);
- ret = dma_mapping_error(dma_dev, dst_addr);
- if (ret) {
- dev_err(dev, "Failed to map remote memory\n");
- goto err_unlock;
- }
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+ return -EIO;
+ }
- desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
- DMA_DEV_TO_MEM,
- DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(dev, "Failed to prepare DMA\n");
- ret = -EIO;
- goto err_unmap;
- }
+ transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+ if (!transfer) {
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
- transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
- if (!transfer) {
- ret = -ENOMEM;
- goto err_unmap;
- }
+ transfer->epf_mhi = epf_mhi;
+ transfer->paddr = dst_addr;
+ transfer->size = buf_info->size;
+ transfer->dir = DMA_FROM_DEVICE;
+ memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
- transfer->epf_mhi = epf_mhi;
- transfer->paddr = dst_addr;
- transfer->size = buf_info->size;
- transfer->dir = DMA_FROM_DEVICE;
- memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+ desc->callback = pci_epf_mhi_dma_async_callback;
+ desc->callback_param = transfer;
- desc->callback = pci_epf_mhi_dma_async_callback;
- desc->callback_param = transfer;
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ kfree(transfer);
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+ return ret;
+ }
- cookie = dmaengine_submit(desc);
- ret = dma_submit_error(cookie);
- if (ret) {
- dev_err(dev, "Failed to do DMA submit\n");
- goto err_free_transfer;
+ dma_async_issue_pending(chan);
}
-
- dma_async_issue_pending(chan);
-
- goto err_unlock;
-
-err_free_transfer:
- kfree(transfer);
-err_unmap:
- dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
-err_unlock:
- mutex_unlock(&epf_mhi->lock);
-
return ret;
}
@@ -576,67 +558,59 @@ static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
dma_addr_t src_addr;
int ret;
- mutex_lock(&epf_mhi->lock);
+ scoped_guard(mutex, &epf_mhi->lock) {
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr = buf_info->host_addr;
- config.direction = DMA_MEM_TO_DEV;
- config.dst_addr = buf_info->host_addr;
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
- ret = dmaengine_slave_config(chan, &config);
- if (ret) {
- dev_err(dev, "Failed to configure DMA channel\n");
- goto err_unlock;
- }
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ return ret;
+ }
- src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
- DMA_TO_DEVICE);
- ret = dma_mapping_error(dma_dev, src_addr);
- if (ret) {
- dev_err(dev, "Failed to map remote memory\n");
- goto err_unlock;
- }
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+ return -EIO;
+ }
- desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
- DMA_MEM_TO_DEV,
- DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(dev, "Failed to prepare DMA\n");
- ret = -EIO;
- goto err_unmap;
- }
+ transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+ if (!transfer) {
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
- transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
- if (!transfer) {
- ret = -ENOMEM;
- goto err_unmap;
- }
+ transfer->epf_mhi = epf_mhi;
+ transfer->paddr = src_addr;
+ transfer->size = buf_info->size;
+ transfer->dir = DMA_TO_DEVICE;
+ memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
- transfer->epf_mhi = epf_mhi;
- transfer->paddr = src_addr;
- transfer->size = buf_info->size;
- transfer->dir = DMA_TO_DEVICE;
- memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+ desc->callback = pci_epf_mhi_dma_async_callback;
+ desc->callback_param = transfer;
- desc->callback = pci_epf_mhi_dma_async_callback;
- desc->callback_param = transfer;
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ kfree(transfer);
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+ return ret;
+ }
- cookie = dmaengine_submit(desc);
- ret = dma_submit_error(cookie);
- if (ret) {
- dev_err(dev, "Failed to do DMA submit\n");
- goto err_free_transfer;
+ dma_async_issue_pending(chan);
}
-
- dma_async_issue_pending(chan);
-
- goto err_unlock;
-
-err_free_transfer:
- kfree(transfer);
-err_unmap:
- dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
-err_unlock:
- mutex_unlock(&epf_mhi->lock);
-
return ret;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/2] PCI: endpoint: Use scoped_guard for manual mutex lock/unlock
2025-05-01 15:56 [PATCH 0/2] Use scoped_guard to safely manage mutex locking Erick Karanja
2025-05-01 15:56 ` [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with scoped_guard() Erick Karanja
@ 2025-05-01 15:56 ` Erick Karanja
2025-05-02 8:43 ` Jonathan Cameron
1 sibling, 1 reply; 5+ messages in thread
From: Erick Karanja @ 2025-05-01 15:56 UTC (permalink / raw)
To: manivannan.sadhasivam, kw
Cc: kishon, bhelgaas, linux-pci, linux-kernel, julia.lawall,
Erick Karanja
This refactor replaces manual mutex lock/unlock with scoped_guard()
in places where early exits use goto. Using scoped_guard()
avoids error-prone unlock paths and simplifies control flow.
Signed-off-by: Erick Karanja <karanja99erick@gmail.com>
---
drivers/pci/endpoint/pci-epc-core.c | 53 +++++++++++++----------------
1 file changed, 24 insertions(+), 29 deletions(-)
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index beabea00af91..3f3ff36fa8ab 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -709,7 +709,6 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
{
struct list_head *list;
u32 func_no;
- int ret = 0;
if (IS_ERR_OR_NULL(epc) || epf->is_vf)
return -EINVAL;
@@ -720,36 +719,32 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
if (type == SECONDARY_INTERFACE && epf->sec_epc)
return -EBUSY;
- mutex_lock(&epc->list_lock);
- func_no = find_first_zero_bit(&epc->function_num_map,
- BITS_PER_LONG);
- if (func_no >= BITS_PER_LONG) {
- ret = -EINVAL;
- goto ret;
- }
-
- if (func_no > epc->max_functions - 1) {
- dev_err(&epc->dev, "Exceeding max supported Function Number\n");
- ret = -EINVAL;
- goto ret;
+ scoped_guard(mutex, &epc->list_lock) {
+ func_no = find_first_zero_bit(&epc->function_num_map,
+ BITS_PER_LONG);
+ if (func_no >= BITS_PER_LONG)
+ return -EINVAL;
+
+ if (func_no > epc->max_functions - 1) {
+ dev_err(&epc->dev, "Exceeding max supported Function Number\n");
+ return -EINVAL;
+ }
+
+ set_bit(func_no, &epc->function_num_map);
+ if (type == PRIMARY_INTERFACE) {
+ epf->func_no = func_no;
+ epf->epc = epc;
+ list = &epf->list;
+ } else {
+ epf->sec_epc_func_no = func_no;
+ epf->sec_epc = epc;
+ list = &epf->sec_epc_list;
+ }
+
+ list_add_tail(list, &epc->pci_epf);
}
- set_bit(func_no, &epc->function_num_map);
- if (type == PRIMARY_INTERFACE) {
- epf->func_no = func_no;
- epf->epc = epc;
- list = &epf->list;
- } else {
- epf->sec_epc_func_no = func_no;
- epf->sec_epc = epc;
- list = &epf->sec_epc_list;
- }
-
- list_add_tail(list, &epc->pci_epf);
-ret:
- mutex_unlock(&epc->list_lock);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(pci_epc_add_epf);
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with scoped_guard()
2025-05-01 15:56 ` [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with scoped_guard() Erick Karanja
@ 2025-05-02 8:41 ` Jonathan Cameron
0 siblings, 0 replies; 5+ messages in thread
From: Jonathan Cameron @ 2025-05-02 8:41 UTC (permalink / raw)
To: Erick Karanja
Cc: manivannan.sadhasivam, kw, kishon, bhelgaas, linux-pci,
linux-kernel, julia.lawall
On Thu, 1 May 2025 18:56:11 +0300
Erick Karanja <karanja99erick@gmail.com> wrote:
In the patch name always mention which driver it is.
> This refactor replaces manual mutex lock/unlock with scoped_guard()
> in places where early exits use goto. Using scoped_guard()
> avoids error-prone unlock paths and simplifies control flow.
In this case that only works because you've replicated the other
unwinding coding in various error paths. That's more error prone
so I'm not convinced this particular case is a good use of scoped cleanup.
>
> Signed-off-by: Erick Karanja <karanja99erick@gmail.com>
> ---
> drivers/pci/endpoint/functions/pci-epf-mhi.c | 358 +++++++++----------
> 1 file changed, 166 insertions(+), 192 deletions(-)
>
> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> index 6643a88c7a0c..57ef522c3d07 100644
> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> @@ -323,57 +323,52 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
> if (buf_info->size < SZ_4K)
> return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
>
> - mutex_lock(&epf_mhi->lock);
> -
> - config.direction = DMA_DEV_TO_MEM;
> - config.src_addr = buf_info->host_addr;
> + scoped_guard(mutex, &epf_mhi->lock) {
I'd be tempted to use
guard(mutex)(&epf_mhi->lock); given your scope goes to the end
of the function.
That will reduce the diff and make it easier to spot any functional changes...
> + config.direction = DMA_DEV_TO_MEM;
> + config.src_addr = buf_info->host_addr;
>
> - ret = dmaengine_slave_config(chan, &config);
> - if (ret) {
> - dev_err(dev, "Failed to configure DMA channel\n");
> - goto err_unlock;
> - }
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + return ret;
> + }
>
> - dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> - DMA_FROM_DEVICE);
> - ret = dma_mapping_error(dma_dev, dst_addr);
> - if (ret) {
> - dev_err(dev, "Failed to map remote memory\n");
> - goto err_unlock;
> - }
> + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_FROM_DEVICE);
> + ret = dma_mapping_error(dma_dev, dst_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + return ret;
> + }
>
> - desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> - DMA_DEV_TO_MEM,
> - DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> - if (!desc) {
> - dev_err(dev, "Failed to prepare DMA\n");
> - ret = -EIO;
> - goto err_unmap;
> - }
> + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> + DMA_DEV_TO_MEM,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
In cases like this where is other cleanup to do the advantages of scoped
cleanup are greatly reduced. I'm not sure it is a good idea here.
> + return -EIO;
> + }
>
> - desc->callback = pci_epf_mhi_dma_callback;
> - desc->callback_param = &complete;
> + desc->callback = pci_epf_mhi_dma_callback;
> + desc->callback_param = &complete;
>
> - cookie = dmaengine_submit(desc);
> - ret = dma_submit_error(cookie);
> - if (ret) {
> - dev_err(dev, "Failed to do DMA submit\n");
> - goto err_unmap;
> - }
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> + return ret;
> + }
>
> - dma_async_issue_pending(chan);
> - ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> - if (!ret) {
> - dev_err(dev, "DMA transfer timeout\n");
> - dmaengine_terminate_sync(chan);
> - ret = -ETIMEDOUT;
This path previously hit the err_unmap condition and now it doesn't.
Why that functional change?
> + dma_async_issue_pending(chan);
> + ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> + if (!ret) {
> + dev_err(dev, "DMA transfer timeout\n");
> + dmaengine_terminate_sync(chan);
> + ret = -ETIMEDOUT;
return -ETIMEDOUT;
> + }
> }
> -
Even in good path were were previously unmapping and now we aren't.
> -err_unmap:
> - dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> -err_unlock:
> - mutex_unlock(&epf_mhi->lock);
> -
> return ret;
return 0; //assuming no way to get here with ret set.
> }
>
> @@ -394,57 +389,52 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
> if (buf_info->size < SZ_4K)
> return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
>
> - mutex_lock(&epf_mhi->lock);
> -
> - config.direction = DMA_MEM_TO_DEV;
> - config.dst_addr = buf_info->host_addr;
> + scoped_guard(mutex, &epf_mhi->lock) {
> + config.direction = DMA_MEM_TO_DEV;
> + config.dst_addr = buf_info->host_addr;
>
> - ret = dmaengine_slave_config(chan, &config);
> - if (ret) {
> - dev_err(dev, "Failed to configure DMA channel\n");
> - goto err_unlock;
> - }
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + return ret;
> + }
>
> - src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> - DMA_TO_DEVICE);
> - ret = dma_mapping_error(dma_dev, src_addr);
> - if (ret) {
> - dev_err(dev, "Failed to map remote memory\n");
> - goto err_unlock;
> - }
> + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_TO_DEVICE);
> + ret = dma_mapping_error(dma_dev, src_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + return ret;
> + }
>
> - desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> - DMA_MEM_TO_DEV,
> - DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> - if (!desc) {
> - dev_err(dev, "Failed to prepare DMA\n");
> - ret = -EIO;
> - goto err_unmap;
> - }
> + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> + DMA_MEM_TO_DEV,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return -EIO;
> + }
>
> - desc->callback = pci_epf_mhi_dma_callback;
> - desc->callback_param = &complete;
> + desc->callback = pci_epf_mhi_dma_callback;
> + desc->callback_param = &complete;
>
> - cookie = dmaengine_submit(desc);
> - ret = dma_submit_error(cookie);
> - if (ret) {
> - dev_err(dev, "Failed to do DMA submit\n");
> - goto err_unmap;
> - }
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return ret;
> + }
>
> - dma_async_issue_pending(chan);
> - ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> - if (!ret) {
> - dev_err(dev, "DMA transfer timeout\n");
> - dmaengine_terminate_sync(chan);
> - ret = -ETIMEDOUT;
> + dma_async_issue_pending(chan);
> + ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> + if (!ret) {
> + dev_err(dev, "DMA transfer timeout\n");
> + dmaengine_terminate_sync(chan);
> + ret = -ETIMEDOUT;
return -EITMEDOUT;
Same issue with logic change to no unmap any more.
> + }
> }
> -
> -err_unmap:
> - dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> -err_unlock:
> - mutex_unlock(&epf_mhi->lock);
> -
> return ret;
return 0; if no way to get here without an error set
> }
>
> @@ -497,67 +487,59 @@ static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
> dma_addr_t dst_addr;
> int ret;
>
> - mutex_lock(&epf_mhi->lock);
> + scoped_guard(mutex, &epf_mhi->lock) {
> + config.direction = DMA_DEV_TO_MEM;
> + config.src_addr = buf_info->host_addr;
>
> - config.direction = DMA_DEV_TO_MEM;
> - config.src_addr = buf_info->host_addr;
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + return ret;
> + }
>
> - ret = dmaengine_slave_config(chan, &config);
> - if (ret) {
> - dev_err(dev, "Failed to configure DMA channel\n");
> - goto err_unlock;
> - }
> + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_FROM_DEVICE);
> + ret = dma_mapping_error(dma_dev, dst_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + return ret;
> + }
>
> - dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> - DMA_FROM_DEVICE);
> - ret = dma_mapping_error(dma_dev, dst_addr);
> - if (ret) {
> - dev_err(dev, "Failed to map remote memory\n");
> - goto err_unlock;
> - }
> + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> + DMA_DEV_TO_MEM,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> + return -EIO;
> + }
>
> - desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> - DMA_DEV_TO_MEM,
> - DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> - if (!desc) {
> - dev_err(dev, "Failed to prepare DMA\n");
> - ret = -EIO;
> - goto err_unmap;
> - }
> + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> + if (!transfer) {
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> + return -ENOMEM;
> + }
>
> - transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> - if (!transfer) {
> - ret = -ENOMEM;
> - goto err_unmap;
> - }
> + transfer->epf_mhi = epf_mhi;
> + transfer->paddr = dst_addr;
> + transfer->size = buf_info->size;
> + transfer->dir = DMA_FROM_DEVICE;
> + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
>
> - transfer->epf_mhi = epf_mhi;
> - transfer->paddr = dst_addr;
> - transfer->size = buf_info->size;
> - transfer->dir = DMA_FROM_DEVICE;
> - memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
> + desc->callback = pci_epf_mhi_dma_async_callback;
> + desc->callback_param = transfer;
>
> - desc->callback = pci_epf_mhi_dma_async_callback;
> - desc->callback_param = transfer;
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + kfree(transfer);
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> + return ret;
> + }
>
> - cookie = dmaengine_submit(desc);
> - ret = dma_submit_error(cookie);
> - if (ret) {
> - dev_err(dev, "Failed to do DMA submit\n");
> - goto err_free_transfer;
> + dma_async_issue_pending(chan);
> }
> -
> - dma_async_issue_pending(chan);
> -
> - goto err_unlock;
> -
> -err_free_transfer:
> - kfree(transfer);
> -err_unmap:
> - dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> -err_unlock:
> - mutex_unlock(&epf_mhi->lock);
> -
> return ret;
> }
>
> @@ -576,67 +558,59 @@ static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
> dma_addr_t src_addr;
> int ret;
>
> - mutex_lock(&epf_mhi->lock);
> + scoped_guard(mutex, &epf_mhi->lock) {
> + config.direction = DMA_MEM_TO_DEV;
> + config.dst_addr = buf_info->host_addr;
>
> - config.direction = DMA_MEM_TO_DEV;
> - config.dst_addr = buf_info->host_addr;
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + return ret;
> + }
>
> - ret = dmaengine_slave_config(chan, &config);
> - if (ret) {
> - dev_err(dev, "Failed to configure DMA channel\n");
> - goto err_unlock;
> - }
> + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_TO_DEVICE);
> + ret = dma_mapping_error(dma_dev, src_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + return ret;
> + }
>
> - src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> - DMA_TO_DEVICE);
> - ret = dma_mapping_error(dma_dev, src_addr);
> - if (ret) {
> - dev_err(dev, "Failed to map remote memory\n");
> - goto err_unlock;
> - }
> + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> + DMA_MEM_TO_DEV,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return -EIO;
> + }
>
> - desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> - DMA_MEM_TO_DEV,
> - DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> - if (!desc) {
> - dev_err(dev, "Failed to prepare DMA\n");
> - ret = -EIO;
> - goto err_unmap;
> - }
> + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> + if (!transfer) {
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return -ENOMEM;
> + }
>
> - transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> - if (!transfer) {
> - ret = -ENOMEM;
> - goto err_unmap;
> - }
> + transfer->epf_mhi = epf_mhi;
> + transfer->paddr = src_addr;
> + transfer->size = buf_info->size;
> + transfer->dir = DMA_TO_DEVICE;
> + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
>
> - transfer->epf_mhi = epf_mhi;
> - transfer->paddr = src_addr;
> - transfer->size = buf_info->size;
> - transfer->dir = DMA_TO_DEVICE;
> - memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
> + desc->callback = pci_epf_mhi_dma_async_callback;
> + desc->callback_param = transfer;
>
> - desc->callback = pci_epf_mhi_dma_async_callback;
> - desc->callback_param = transfer;
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + kfree(transfer);
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return ret;
> + }
>
> - cookie = dmaengine_submit(desc);
> - ret = dma_submit_error(cookie);
> - if (ret) {
> - dev_err(dev, "Failed to do DMA submit\n");
> - goto err_free_transfer;
> + dma_async_issue_pending(chan);
> }
> -
> - dma_async_issue_pending(chan);
> -
> - goto err_unlock;
> -
> -err_free_transfer:
> - kfree(transfer);
> -err_unmap:
> - dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> -err_unlock:
> - mutex_unlock(&epf_mhi->lock);
> -
> return ret;
> }
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 2/2] PCI: endpoint: Use scoped_guard for manual mutex lock/unlock
2025-05-01 15:56 ` [PATCH 2/2] PCI: endpoint: Use scoped_guard for manual mutex lock/unlock Erick Karanja
@ 2025-05-02 8:43 ` Jonathan Cameron
0 siblings, 0 replies; 5+ messages in thread
From: Jonathan Cameron @ 2025-05-02 8:43 UTC (permalink / raw)
To: Erick Karanja
Cc: manivannan.sadhasivam, kw, kishon, bhelgaas, linux-pci,
linux-kernel, julia.lawall
On Thu, 1 May 2025 18:56:12 +0300
Erick Karanja <karanja99erick@gmail.com> wrote:
> This refactor replaces manual mutex lock/unlock with scoped_guard()
> in places where early exits use goto. Using scoped_guard()
> avoids error-prone unlock paths and simplifies control flow.
>
> Signed-off-by: Erick Karanja <karanja99erick@gmail.com>
> ---
> drivers/pci/endpoint/pci-epc-core.c | 53 +++++++++++++----------------
> 1 file changed, 24 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
> index beabea00af91..3f3ff36fa8ab 100644
> --- a/drivers/pci/endpoint/pci-epc-core.c
> +++ b/drivers/pci/endpoint/pci-epc-core.c
> @@ -709,7 +709,6 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
> {
> struct list_head *list;
> u32 func_no;
> - int ret = 0;
>
> if (IS_ERR_OR_NULL(epc) || epf->is_vf)
> return -EINVAL;
> @@ -720,36 +719,32 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
> if (type == SECONDARY_INTERFACE && epf->sec_epc)
> return -EBUSY;
>
> - mutex_lock(&epc->list_lock);
> - func_no = find_first_zero_bit(&epc->function_num_map,
> - BITS_PER_LONG);
> - if (func_no >= BITS_PER_LONG) {
> - ret = -EINVAL;
> - goto ret;
> - }
> -
> - if (func_no > epc->max_functions - 1) {
> - dev_err(&epc->dev, "Exceeding max supported Function Number\n");
> - ret = -EINVAL;
> - goto ret;
> + scoped_guard(mutex, &epc->list_lock) {
This one is better, but using
guard(mutex)(&epc->list_lock);
Is going to make for an easier to read patch and lower indent etc.
Unless there is some subsystem related reason that scoped_guard() is
preferred then I'd go that way.
> + func_no = find_first_zero_bit(&epc->function_num_map,
> + BITS_PER_LONG);
> + if (func_no >= BITS_PER_LONG)
> + return -EINVAL;
> +
> + if (func_no > epc->max_functions - 1) {
> + dev_err(&epc->dev, "Exceeding max supported Function Number\n");
> + return -EINVAL;
> + }
> +
> + set_bit(func_no, &epc->function_num_map);
> + if (type == PRIMARY_INTERFACE) {
> + epf->func_no = func_no;
> + epf->epc = epc;
> + list = &epf->list;
> + } else {
> + epf->sec_epc_func_no = func_no;
> + epf->sec_epc = epc;
> + list = &epf->sec_epc_list;
> + }
> +
> + list_add_tail(list, &epc->pci_epf);
> }
>
> - set_bit(func_no, &epc->function_num_map);
> - if (type == PRIMARY_INTERFACE) {
> - epf->func_no = func_no;
> - epf->epc = epc;
> - list = &epf->list;
> - } else {
> - epf->sec_epc_func_no = func_no;
> - epf->sec_epc = epc;
> - list = &epf->sec_epc_list;
> - }
> -
> - list_add_tail(list, &epc->pci_epf);
> -ret:
> - mutex_unlock(&epc->list_lock);
> -
> - return ret;
> + return 0;
> }
> EXPORT_SYMBOL_GPL(pci_epc_add_epf);
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2025-05-02 8:43 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-01 15:56 [PATCH 0/2] Use scoped_guard to safely manage mutex locking Erick Karanja
2025-05-01 15:56 ` [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with scoped_guard() Erick Karanja
2025-05-02 8:41 ` Jonathan Cameron
2025-05-01 15:56 ` [PATCH 2/2] PCI: endpoint: Use scoped_guard for manual mutex lock/unlock Erick Karanja
2025-05-02 8:43 ` Jonathan Cameron
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox