linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] perf/x86: Use guard(mutex) instead of mutex_lock() to simplify code
@ 2025-08-29 11:48 Liao Yuanhong
  2025-09-02 16:30 ` Liang, Kan
  0 siblings, 1 reply; 2+ messages in thread
From: Liao Yuanhong @ 2025-08-29 11:48 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Mark Rutland, Alexander Shishkin, Jiri Olsa,
	Ian Rogers, Adrian Hunter, Liang, Kan, Thomas Gleixner,
	Borislav Petkov, Dave Hansen,
	maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT), H. Peter Anvin,
	open list:PERFORMANCE EVENTS SUBSYSTEM,
	open list:PERFORMANCE EVENTS SUBSYSTEM
  Cc: Liao Yuanhong

Using guard(mutex) and scoped_guard() instead of mutex_lock/mutex_unlock
pair. Simplifies the error handling to just return in case of error. No
need for the fail_unlock: label anymore so remove it.

Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
---
 arch/x86/events/core.c | 21 +++++++--------------
 1 file changed, 7 insertions(+), 14 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 745caa6c15a3..107bed5c9d71 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -411,7 +411,7 @@ int x86_reserve_hardware(void)
 	int err = 0;
 
 	if (!atomic_inc_not_zero(&pmc_refcount)) {
-		mutex_lock(&pmc_reserve_mutex);
+		guard(mutex)(&pmc_reserve_mutex);
 		if (atomic_read(&pmc_refcount) == 0) {
 			if (!reserve_pmc_hardware()) {
 				err = -EBUSY;
@@ -422,7 +422,6 @@ int x86_reserve_hardware(void)
 		}
 		if (!err)
 			atomic_inc(&pmc_refcount);
-		mutex_unlock(&pmc_reserve_mutex);
 	}
 
 	return err;
@@ -444,8 +443,6 @@ void x86_release_hardware(void)
  */
 int x86_add_exclusive(unsigned int what)
 {
-	int i;
-
 	/*
 	 * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
 	 * LBR and BTS are still mutually exclusive.
@@ -454,22 +451,18 @@ int x86_add_exclusive(unsigned int what)
 		goto out;
 
 	if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
-		mutex_lock(&pmc_reserve_mutex);
-		for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
-			if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
-				goto fail_unlock;
+		scoped_guard(mutex, &pmc_reserve_mutex) {
+			for (int i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
+				if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
+					return -EBUSY;
+			}
+			atomic_inc(&x86_pmu.lbr_exclusive[what]);
 		}
-		atomic_inc(&x86_pmu.lbr_exclusive[what]);
-		mutex_unlock(&pmc_reserve_mutex);
 	}
 
 out:
 	atomic_inc(&active_events);
 	return 0;
-
-fail_unlock:
-	mutex_unlock(&pmc_reserve_mutex);
-	return -EBUSY;
 }
 
 void x86_del_exclusive(unsigned int what)
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] perf/x86: Use guard(mutex) instead of mutex_lock() to simplify code
  2025-08-29 11:48 [PATCH] perf/x86: Use guard(mutex) instead of mutex_lock() to simplify code Liao Yuanhong
@ 2025-09-02 16:30 ` Liang, Kan
  0 siblings, 0 replies; 2+ messages in thread
From: Liang, Kan @ 2025-09-02 16:30 UTC (permalink / raw)
  To: Liao Yuanhong, Peter Zijlstra, Ingo Molnar,
	Arnaldo Carvalho de Melo, Namhyung Kim, Mark Rutland,
	Alexander Shishkin, Jiri Olsa, Ian Rogers, Adrian Hunter,
	Thomas Gleixner, Borislav Petkov, Dave Hansen,
	maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT), H. Peter Anvin,
	open list:PERFORMANCE EVENTS SUBSYSTEM,
	open list:PERFORMANCE EVENTS SUBSYSTEM



On 2025-08-29 4:48 a.m., Liao Yuanhong wrote:
> Using guard(mutex) and scoped_guard() instead of mutex_lock/mutex_unlock
> pair. Simplifies the error handling to just return in case of error. No
> need for the fail_unlock: label anymore so remove it.
> 
> Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
> ---
>  arch/x86/events/core.c | 21 +++++++--------------
>  1 file changed, 7 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 745caa6c15a3..107bed5c9d71 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -411,7 +411,7 @@ int x86_reserve_hardware(void)
>  	int err = 0;
>  
>  	if (!atomic_inc_not_zero(&pmc_refcount)) {
> -		mutex_lock(&pmc_reserve_mutex);
> +		guard(mutex)(&pmc_reserve_mutex);

Shouldn't it be a scoped_guard() as well?

Thanks,
Kan

>  		if (atomic_read(&pmc_refcount) == 0) {
>  			if (!reserve_pmc_hardware()) {
>  				err = -EBUSY;
> @@ -422,7 +422,6 @@ int x86_reserve_hardware(void)
>  		}
>  		if (!err)
>  			atomic_inc(&pmc_refcount);
> -		mutex_unlock(&pmc_reserve_mutex);
>  	}
>  
>  	return err;
> @@ -444,8 +443,6 @@ void x86_release_hardware(void)
>   */
>  int x86_add_exclusive(unsigned int what)
>  {
> -	int i;
> -
>  	/*
>  	 * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
>  	 * LBR and BTS are still mutually exclusive.
> @@ -454,22 +451,18 @@ int x86_add_exclusive(unsigned int what)
>  		goto out;
>  
>  	if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
> -		mutex_lock(&pmc_reserve_mutex);
> -		for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
> -			if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
> -				goto fail_unlock;
> +		scoped_guard(mutex, &pmc_reserve_mutex) {
> +			for (int i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
> +				if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
> +					return -EBUSY;
> +			}
> +			atomic_inc(&x86_pmu.lbr_exclusive[what]);
>  		}
> -		atomic_inc(&x86_pmu.lbr_exclusive[what]);
> -		mutex_unlock(&pmc_reserve_mutex);
>  	}
>  
>  out:
>  	atomic_inc(&active_events);
>  	return 0;
> -
> -fail_unlock:
> -	mutex_unlock(&pmc_reserve_mutex);
> -	return -EBUSY;
>  }
>  
>  void x86_del_exclusive(unsigned int what)


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-09-02 16:30 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-29 11:48 [PATCH] perf/x86: Use guard(mutex) instead of mutex_lock() to simplify code Liao Yuanhong
2025-09-02 16:30 ` Liang, Kan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).