kernel-testers.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Fix dead lock in cpufreq for CPU hotplug and suspend for 2.6.30.stable
       [not found] ` <20090623193215.GA31374-X9Un+BFzKDI@public.gmane.org>
@ 2009-06-25 14:01   ` Thomas Renninger
       [not found]     ` <1245938485-12663-1-git-send-email-trenn-l3A5Bk7waGM@public.gmane.org>
  0 siblings, 1 reply; 15+ messages in thread
From: Thomas Renninger @ 2009-06-25 14:01 UTC (permalink / raw)
  To: kernel-soeCzev1AWYdnm+yROfE0A
  Cc: cpufreq-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA, mingo-X9Un+BFzKDI,
	rjw-KKrjLPT3xs0, hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	penberg-bbCR+/B0CizivPeTLB3BmA,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA,
	davej-H+wXaHxf7aLQT0dZR+AlfA,
	mathieu.desnoyers-scC8bbJcJLCw5LPnMra/2Q

This is about the dead locks happening since cancel_delayed_work_sync()
got added to the ondemand/conservative governors.

I truncated a bit the CC list to most important people and mailing lists.

The patch is intended for 2.6.30 stable, but both patches together did
not get tested yet. If someone could verify this working now that
would be great.

I first wanted to provide something for .31 and then backport, but
it wouldn't patch clean anymore anyway and I don't want to wait
any longer.

Rafael: These should solve the regression bug:
http://bugzilla.kernel.org/show_bug.cgi?id=13475


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
       [not found] <20090623193215.GA31374@elte.hu>
       [not found] ` <20090623193215.GA31374-X9Un+BFzKDI@public.gmane.org>
@ 2009-06-25 14:01 ` Thomas Renninger
       [not found]   ` <1245938485-12663-2-git-send-email-trenn-l3A5Bk7waGM@public.gmane.org>
  2009-06-25 14:01 ` [PATCH 2/2] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site) Thomas Renninger
  2 siblings, 1 reply; 15+ messages in thread
From: Thomas Renninger @ 2009-06-25 14:01 UTC (permalink / raw)
  To: kernel
  Cc: cpufreq, linux-kernel, mingo, rjw, hidave.darkstar, penberg,
	kernel-testers, davej, mathieu.desnoyers, Thomas Renninger,
	Venkatesh Pallipadi

Comment from Venkatesh:
...
This mutex is just serializing the changes to those variables. I could't
think of any functionality issues of not having the lock as such.

-> rip it out.

CC: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Thomas Renninger <trenn@suse.de>
---
 drivers/cpufreq/cpufreq_conservative.c |   61 +++-----------------------------
 drivers/cpufreq/cpufreq_ondemand.c     |   48 +++----------------------
 2 files changed, 10 insertions(+), 99 deletions(-)

diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7a74d17..6303379 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -18,7 +18,6 @@
 #include <linux/cpu.h>
 #include <linux/jiffies.h>
 #include <linux/kernel_stat.h>
-#include <linux/mutex.h>
 #include <linux/hrtimer.h>
 #include <linux/tick.h>
 #include <linux/ktime.h>
@@ -84,19 +83,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
 static unsigned int dbs_enable;	/* number of CPUs using this policy */
 
-/*
- * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
- * lock and dbs_mutex. cpu_hotplug lock should always be held before
- * dbs_mutex. If any function that can potentially take cpu_hotplug lock
- * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
- * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
- * is recursive for the same process. -Venki
- * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
- * would deadlock with cancel_delayed_work_sync(), which is needed for proper
- * raceless workqueue teardown.
- */
-static DEFINE_MUTEX(dbs_mutex);
-
 static struct workqueue_struct	*kconservative_wq;
 
 static struct dbs_tuners {
@@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
 	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
 		return -EINVAL;
 
-	mutex_lock(&dbs_mutex);
 	dbs_tuners_ins.sampling_down_factor = input;
-	mutex_unlock(&dbs_mutex);
-
 	return count;
 }
 
@@ -253,10 +236,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
 	if (ret != 1)
 		return -EINVAL;
 
-	mutex_lock(&dbs_mutex);
 	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
-	mutex_unlock(&dbs_mutex);
-
 	return count;
 }
 
@@ -267,16 +247,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
-	mutex_lock(&dbs_mutex);
 	if (ret != 1 || input > 100 ||
-			input <= dbs_tuners_ins.down_threshold) {
-		mutex_unlock(&dbs_mutex);
+			input <= dbs_tuners_ins.down_threshold)
 		return -EINVAL;
-	}
 
 	dbs_tuners_ins.up_threshold = input;
-	mutex_unlock(&dbs_mutex);
-
 	return count;
 }
 
@@ -287,17 +262,12 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
-	mutex_lock(&dbs_mutex);
 	/* cannot be lower than 11 otherwise freq will not fall */
 	if (ret != 1 || input < 11 || input > 100 ||
-			input >= dbs_tuners_ins.up_threshold) {
-		mutex_unlock(&dbs_mutex);
+			input >= dbs_tuners_ins.up_threshold)
 		return -EINVAL;
-	}
 
 	dbs_tuners_ins.down_threshold = input;
-	mutex_unlock(&dbs_mutex);
-
 	return count;
 }
 
@@ -316,11 +286,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
 	if (input > 1)
 		input = 1;
 
-	mutex_lock(&dbs_mutex);
-	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
-		mutex_unlock(&dbs_mutex);
+	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
 		return count;
-	}
+
 	dbs_tuners_ins.ignore_nice = input;
 
 	/* we need to re-evaluate prev_cpu_idle */
@@ -332,8 +300,6 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
 		if (dbs_tuners_ins.ignore_nice)
 			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
 	}
-	mutex_unlock(&dbs_mutex);
-
 	return count;
 }
 
@@ -352,10 +318,7 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
 
 	/* no need to test here if freq_step is zero as the user might actually
 	 * want this, they would be crazy though :) */
-	mutex_lock(&dbs_mutex);
 	dbs_tuners_ins.freq_step = input;
-	mutex_unlock(&dbs_mutex);
-
 	return count;
 }
 
@@ -566,13 +529,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 		if (this_dbs_info->enable) /* Already enabled */
 			break;
 
-		mutex_lock(&dbs_mutex);
-
 		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
-		if (rc) {
-			mutex_unlock(&dbs_mutex);
+		if (rc)
 			return rc;
-		}
 
 		for_each_cpu(j, policy->cpus) {
 			struct cpu_dbs_info_s *j_dbs_info;
@@ -612,13 +571,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 					CPUFREQ_TRANSITION_NOTIFIER);
 		}
 		dbs_timer_init(this_dbs_info);
-
-		mutex_unlock(&dbs_mutex);
-
 		break;
 
 	case CPUFREQ_GOV_STOP:
-		mutex_lock(&dbs_mutex);
 		dbs_timer_exit(this_dbs_info);
 		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
 		dbs_enable--;
@@ -631,13 +586,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 			cpufreq_unregister_notifier(
 					&dbs_cpufreq_notifier_block,
 					CPUFREQ_TRANSITION_NOTIFIER);
-
-		mutex_unlock(&dbs_mutex);
-
 		break;
 
 	case CPUFREQ_GOV_LIMITS:
-		mutex_lock(&dbs_mutex);
 		if (policy->max < this_dbs_info->cur_policy->cur)
 			__cpufreq_driver_target(
 					this_dbs_info->cur_policy,
@@ -646,8 +597,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 			__cpufreq_driver_target(
 					this_dbs_info->cur_policy,
 					policy->min, CPUFREQ_RELATION_L);
-		mutex_unlock(&dbs_mutex);
-
 		break;
 	}
 	return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e741c33..d080a48 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -17,7 +17,6 @@
 #include <linux/cpu.h>
 #include <linux/jiffies.h>
 #include <linux/kernel_stat.h>
-#include <linux/mutex.h>
 #include <linux/hrtimer.h>
 #include <linux/tick.h>
 #include <linux/ktime.h>
@@ -91,19 +90,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
 static unsigned int dbs_enable;	/* number of CPUs using this policy */
 
-/*
- * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
- * lock and dbs_mutex. cpu_hotplug lock should always be held before
- * dbs_mutex. If any function that can potentially take cpu_hotplug lock
- * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
- * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
- * is recursive for the same process. -Venki
- * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
- * would deadlock with cancel_delayed_work_sync(), which is needed for proper
- * raceless workqueue teardown.
- */
-static DEFINE_MUTEX(dbs_mutex);
-
 static struct workqueue_struct	*kondemand_wq;
 
 static struct dbs_tuners {
@@ -269,14 +255,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
-	mutex_lock(&dbs_mutex);
-	if (ret != 1) {
-		mutex_unlock(&dbs_mutex);
+	if (ret != 1)
 		return -EINVAL;
-	}
-	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
-	mutex_unlock(&dbs_mutex);
 
+	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
 	return count;
 }
 
@@ -287,16 +269,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
-	mutex_lock(&dbs_mutex);
 	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
-			input < MIN_FREQUENCY_UP_THRESHOLD) {
-		mutex_unlock(&dbs_mutex);
+			input < MIN_FREQUENCY_UP_THRESHOLD)
 		return -EINVAL;
-	}
 
 	dbs_tuners_ins.up_threshold = input;
-	mutex_unlock(&dbs_mutex);
-
 	return count;
 }
 
@@ -315,11 +292,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
 	if (input > 1)
 		input = 1;
 
-	mutex_lock(&dbs_mutex);
-	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
-		mutex_unlock(&dbs_mutex);
+	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
 		return count;
-	}
+
 	dbs_tuners_ins.ignore_nice = input;
 
 	/* we need to re-evaluate prev_cpu_idle */
@@ -332,8 +307,6 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
 			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
 
 	}
-	mutex_unlock(&dbs_mutex);
-
 	return count;
 }
 
@@ -350,10 +323,8 @@ static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
 	if (input > 1000)
 		input = 1000;
 
-	mutex_lock(&dbs_mutex);
 	dbs_tuners_ins.powersave_bias = input;
 	ondemand_powersave_bias_init();
-	mutex_unlock(&dbs_mutex);
 
 	return count;
 }
@@ -586,13 +557,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 		if (this_dbs_info->enable) /* Already enabled */
 			break;
 
-		mutex_lock(&dbs_mutex);
 		dbs_enable++;
 
 		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
 		if (rc) {
 			dbs_enable--;
-			mutex_unlock(&dbs_mutex);
 			return rc;
 		}
 
@@ -627,28 +596,21 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 			dbs_tuners_ins.sampling_rate = def_sampling_rate;
 		}
 		dbs_timer_init(this_dbs_info);
-
-		mutex_unlock(&dbs_mutex);
 		break;
 
 	case CPUFREQ_GOV_STOP:
-		mutex_lock(&dbs_mutex);
 		dbs_timer_exit(this_dbs_info);
 		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
 		dbs_enable--;
-		mutex_unlock(&dbs_mutex);
-
 		break;
 
 	case CPUFREQ_GOV_LIMITS:
-		mutex_lock(&dbs_mutex);
 		if (policy->max < this_dbs_info->cur_policy->cur)
 			__cpufreq_driver_target(this_dbs_info->cur_policy,
 				policy->max, CPUFREQ_RELATION_H);
 		else if (policy->min > this_dbs_info->cur_policy->cur)
 			__cpufreq_driver_target(this_dbs_info->cur_policy,
 				policy->min, CPUFREQ_RELATION_L);
-		mutex_unlock(&dbs_mutex);
 		break;
 	}
 	return 0;
-- 
1.6.0.2

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 2/2] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site)
       [not found] <20090623193215.GA31374@elte.hu>
       [not found] ` <20090623193215.GA31374-X9Un+BFzKDI@public.gmane.org>
  2009-06-25 14:01 ` [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors Thomas Renninger
@ 2009-06-25 14:01 ` Thomas Renninger
  2 siblings, 0 replies; 15+ messages in thread
From: Thomas Renninger @ 2009-06-25 14:01 UTC (permalink / raw)
  To: kernel
  Cc: cpufreq, linux-kernel, mingo, rjw, hidave.darkstar, penberg,
	kernel-testers, davej, mathieu.desnoyers, Thomas Renninger

From: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>

Missed a call site for CPUFREQ_GOV_STOP to remove the rwlock taken around the
teardown. To make a long story short, the rwlock write-lock causes a circular
dependency with cancel_delayed_work_sync(), because the timer handler takes the
write lock.

Note that all callers to __cpufreq_set_policy are taking the rwsem. All sysfs
callers (writers) hold the write rwsem at the earliest sysfs calling stage.

However, the rwlock write-lock is not needed upon governor stop.

Signed-off-by: Thomas Renninger <trenn@suse.de>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
---
 drivers/cpufreq/cpufreq.c |   13 ++++++++++++-
 1 files changed, 12 insertions(+), 1 deletions(-)

diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e2ec0b..7e7ff1b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -61,6 +61,8 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
  *   are concerned with are online after they get the lock.
  * - Governor routines that can be called in cpufreq hotplug path should not
  *   take this sem as top level hotplug notifier handler takes this.
+ * - Lock should not be held across
+ * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
  */
 static DEFINE_PER_CPU(int, policy_cpu);
 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
@@ -1697,8 +1699,17 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
 			dprintk("governor switch\n");
 
 			/* end old governor */
-			if (data->governor)
+			if (data->governor) {
+				/*
+				 * Need to release the rwsem around governor
+				 * stop due to lock dependency between
+				 * cancel_delayed_work_sync and the read lock
+				 * taken in the delayed work handler.
+				 */
+				unlock_policy_rwsem_write(data->cpu);
 				__cpufreq_governor(data, CPUFREQ_GOV_STOP);
+				lock_policy_rwsem_write(data->cpu);
+			}
 
 			/* start new governor */
 			data->governor = policy->governor;
-- 
1.6.0.2

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: Fix dead lock in cpufreq for CPU hotplug and suspend for 2.6.30.stable
       [not found]     ` <1245938485-12663-1-git-send-email-trenn-l3A5Bk7waGM@public.gmane.org>
@ 2009-06-25 14:06       ` Thomas Renninger
  0 siblings, 0 replies; 15+ messages in thread
From: Thomas Renninger @ 2009-06-25 14:06 UTC (permalink / raw)
  To: kernel-soeCzev1AWYdnm+yROfE0A
  Cc: cpufreq-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA, mingo-X9Un+BFzKDI,
	rjw-KKrjLPT3xs0, hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	penberg-bbCR+/B0CizivPeTLB3BmA,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA,
	davej-H+wXaHxf7aLQT0dZR+AlfA,
	mathieu.desnoyers-scC8bbJcJLCw5LPnMra/2Q

On Thursday 25 June 2009 16:01:23 Thomas Renninger wrote:
...
Arggh. I mixed up kernel-soeCzev1AWYdnm+yROfE0A@public.gmane.org with stable-DgEjT+Ai2yi4UlQgPVntAg@public.gmane.org
I bounced them there, please correct the address if you answer...

    Thomas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
       [not found]   ` <1245938485-12663-2-git-send-email-trenn-l3A5Bk7waGM@public.gmane.org>
@ 2009-06-25 14:25     ` Mathieu Desnoyers
  2009-06-25 15:03       ` Pallipadi, Venkatesh
  2009-06-25 22:17       ` Thomas Renninger
  2009-06-30  6:33     ` Pavel Machek
  2009-06-30 22:58     ` [stable] " Greg KH
  2 siblings, 2 replies; 15+ messages in thread
From: Mathieu Desnoyers @ 2009-06-25 14:25 UTC (permalink / raw)
  To: Thomas Renninger
  Cc: kernel-soeCzev1AWYdnm+yROfE0A, cpufreq-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA, mingo-X9Un+BFzKDI,
	rjw-KKrjLPT3xs0, hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	penberg-bbCR+/B0CizivPeTLB3BmA,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA,
	davej-H+wXaHxf7aLQT0dZR+AlfA, Venkatesh Pallipadi

* Thomas Renninger (trenn-l3A5Bk7waGM@public.gmane.org) wrote:
> Comment from Venkatesh:
> ...
> This mutex is just serializing the changes to those variables. I could't
> think of any functionality issues of not having the lock as such.
> 
> -> rip it out.
> 
> CC: Venkatesh Pallipadi <venkatesh.pallipadi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Thomas Renninger <trenn-l3A5Bk7waGM@public.gmane.org>
> ---
>  drivers/cpufreq/cpufreq_conservative.c |   61 +++-----------------------------
>  drivers/cpufreq/cpufreq_ondemand.c     |   48 +++----------------------
>  2 files changed, 10 insertions(+), 99 deletions(-)
> 
> diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
> index 7a74d17..6303379 100644
> --- a/drivers/cpufreq/cpufreq_conservative.c
> +++ b/drivers/cpufreq/cpufreq_conservative.c
> @@ -18,7 +18,6 @@
>  #include <linux/cpu.h>
>  #include <linux/jiffies.h>
>  #include <linux/kernel_stat.h>
> -#include <linux/mutex.h>
>  #include <linux/hrtimer.h>
>  #include <linux/tick.h>
>  #include <linux/ktime.h>
> @@ -84,19 +83,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
>  
>  static unsigned int dbs_enable;	/* number of CPUs using this policy */
>  
> -/*
> - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> - * dbs_mutex. If any function that can potentially take cpu_hotplug lock
> - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
> - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
> - * is recursive for the same process. -Venki
> - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
> - * would deadlock with cancel_delayed_work_sync(), which is needed for proper
> - * raceless workqueue teardown.
> - */
> -static DEFINE_MUTEX(dbs_mutex);
> -
>  static struct workqueue_struct	*kconservative_wq;
>  
>  static struct dbs_tuners {
> @@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
>  	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
>  		return -EINVAL;
>  
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.sampling_down_factor = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -253,10 +236,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
>  	if (ret != 1)
>  		return -EINVAL;
>  
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -267,16 +247,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
>  	int ret;
>  	ret = sscanf(buf, "%u", &input);
>  
> -	mutex_lock(&dbs_mutex);
>  	if (ret != 1 || input > 100 ||
> -			input <= dbs_tuners_ins.down_threshold) {
> -		mutex_unlock(&dbs_mutex);
> +			input <= dbs_tuners_ins.down_threshold)
>  		return -EINVAL;
> -	}
>  
>  	dbs_tuners_ins.up_threshold = input;
> -	mutex_unlock(&dbs_mutex);

Here, for instance, there might be a problem if down_threshold is
changed concurrently with a store_up_threshold() call. See that there is
a test before the modification, and we need the mutex there for it to be
consistent.

> -
>  	return count;
>  }
>  
> @@ -287,17 +262,12 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
>  	int ret;
>  	ret = sscanf(buf, "%u", &input);
>  
> -	mutex_lock(&dbs_mutex);
>  	/* cannot be lower than 11 otherwise freq will not fall */
>  	if (ret != 1 || input < 11 || input > 100 ||
> -			input >= dbs_tuners_ins.up_threshold) {
> -		mutex_unlock(&dbs_mutex);
> +			input >= dbs_tuners_ins.up_threshold)
>  		return -EINVAL;
> -	}
>  
>  	dbs_tuners_ins.down_threshold = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -316,11 +286,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
>  	if (input > 1)
>  		input = 1;
>  
> -	mutex_lock(&dbs_mutex);
> -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> -		mutex_unlock(&dbs_mutex);
> +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
>  		return count;
> -	}
> +
>  	dbs_tuners_ins.ignore_nice = input;
>  
>  	/* we need to re-evaluate prev_cpu_idle */
> @@ -332,8 +300,6 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
>  		if (dbs_tuners_ins.ignore_nice)
>  			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
>  	}
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -352,10 +318,7 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
>  
>  	/* no need to test here if freq_step is zero as the user might actually
>  	 * want this, they would be crazy though :) */
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.freq_step = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -566,13 +529,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,

Hrm, this is where we want the mutexes removed, but I fear this is
creating a race between sysfs_create_group (sysfs file creation) and
policy initialization...

I'm not convinced this mutex is not needed.

Mathieu

>  		if (this_dbs_info->enable) /* Already enabled */
>  			break;
>  
> -		mutex_lock(&dbs_mutex);
> -
>  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
> -		if (rc) {
> -			mutex_unlock(&dbs_mutex);
> +		if (rc)
>  			return rc;
> -		}
>  
>  		for_each_cpu(j, policy->cpus) {
>  			struct cpu_dbs_info_s *j_dbs_info;
> @@ -612,13 +571,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  					CPUFREQ_TRANSITION_NOTIFIER);
>  		}
>  		dbs_timer_init(this_dbs_info);
> -
> -		mutex_unlock(&dbs_mutex);
> -
>  		break;
>  
>  	case CPUFREQ_GOV_STOP:
> -		mutex_lock(&dbs_mutex);
>  		dbs_timer_exit(this_dbs_info);
>  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
>  		dbs_enable--;
> @@ -631,13 +586,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  			cpufreq_unregister_notifier(
>  					&dbs_cpufreq_notifier_block,
>  					CPUFREQ_TRANSITION_NOTIFIER);
> -
> -		mutex_unlock(&dbs_mutex);
> -
>  		break;
>  
>  	case CPUFREQ_GOV_LIMITS:
> -		mutex_lock(&dbs_mutex);
>  		if (policy->max < this_dbs_info->cur_policy->cur)
>  			__cpufreq_driver_target(
>  					this_dbs_info->cur_policy,
> @@ -646,8 +597,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  			__cpufreq_driver_target(
>  					this_dbs_info->cur_policy,
>  					policy->min, CPUFREQ_RELATION_L);
> -		mutex_unlock(&dbs_mutex);
> -
>  		break;
>  	}
>  	return 0;
> diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
> index e741c33..d080a48 100644
> --- a/drivers/cpufreq/cpufreq_ondemand.c
> +++ b/drivers/cpufreq/cpufreq_ondemand.c
> @@ -17,7 +17,6 @@
>  #include <linux/cpu.h>
>  #include <linux/jiffies.h>
>  #include <linux/kernel_stat.h>
> -#include <linux/mutex.h>
>  #include <linux/hrtimer.h>
>  #include <linux/tick.h>
>  #include <linux/ktime.h>
> @@ -91,19 +90,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
>  
>  static unsigned int dbs_enable;	/* number of CPUs using this policy */
>  
> -/*
> - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> - * dbs_mutex. If any function that can potentially take cpu_hotplug lock
> - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
> - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
> - * is recursive for the same process. -Venki
> - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
> - * would deadlock with cancel_delayed_work_sync(), which is needed for proper
> - * raceless workqueue teardown.
> - */
> -static DEFINE_MUTEX(dbs_mutex);
> -
>  static struct workqueue_struct	*kondemand_wq;
>  
>  static struct dbs_tuners {
> @@ -269,14 +255,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
>  	int ret;
>  	ret = sscanf(buf, "%u", &input);
>  
> -	mutex_lock(&dbs_mutex);
> -	if (ret != 1) {
> -		mutex_unlock(&dbs_mutex);
> +	if (ret != 1)
>  		return -EINVAL;
> -	}
> -	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> -	mutex_unlock(&dbs_mutex);
>  
> +	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
>  	return count;
>  }
>  
> @@ -287,16 +269,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
>  	int ret;
>  	ret = sscanf(buf, "%u", &input);
>  
> -	mutex_lock(&dbs_mutex);
>  	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
> -			input < MIN_FREQUENCY_UP_THRESHOLD) {
> -		mutex_unlock(&dbs_mutex);
> +			input < MIN_FREQUENCY_UP_THRESHOLD)
>  		return -EINVAL;
> -	}
>  
>  	dbs_tuners_ins.up_threshold = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -315,11 +292,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
>  	if (input > 1)
>  		input = 1;
>  
> -	mutex_lock(&dbs_mutex);
> -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> -		mutex_unlock(&dbs_mutex);
> +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
>  		return count;
> -	}
> +
>  	dbs_tuners_ins.ignore_nice = input;
>  
>  	/* we need to re-evaluate prev_cpu_idle */
> @@ -332,8 +307,6 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
>  			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
>  
>  	}
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -350,10 +323,8 @@ static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
>  	if (input > 1000)
>  		input = 1000;
>  
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.powersave_bias = input;
>  	ondemand_powersave_bias_init();
> -	mutex_unlock(&dbs_mutex);
>  
>  	return count;
>  }
> @@ -586,13 +557,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  		if (this_dbs_info->enable) /* Already enabled */
>  			break;
>  
> -		mutex_lock(&dbs_mutex);
>  		dbs_enable++;
>  
>  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
>  		if (rc) {
>  			dbs_enable--;
> -			mutex_unlock(&dbs_mutex);
>  			return rc;
>  		}
>  
> @@ -627,28 +596,21 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  			dbs_tuners_ins.sampling_rate = def_sampling_rate;
>  		}
>  		dbs_timer_init(this_dbs_info);
> -
> -		mutex_unlock(&dbs_mutex);
>  		break;
>  
>  	case CPUFREQ_GOV_STOP:
> -		mutex_lock(&dbs_mutex);
>  		dbs_timer_exit(this_dbs_info);
>  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
>  		dbs_enable--;
> -		mutex_unlock(&dbs_mutex);
> -
>  		break;
>  
>  	case CPUFREQ_GOV_LIMITS:
> -		mutex_lock(&dbs_mutex);
>  		if (policy->max < this_dbs_info->cur_policy->cur)
>  			__cpufreq_driver_target(this_dbs_info->cur_policy,
>  				policy->max, CPUFREQ_RELATION_H);
>  		else if (policy->min > this_dbs_info->cur_policy->cur)
>  			__cpufreq_driver_target(this_dbs_info->cur_policy,
>  				policy->min, CPUFREQ_RELATION_L);
> -		mutex_unlock(&dbs_mutex);
>  		break;
>  	}
>  	return 0;
> -- 
> 1.6.0.2
> 

-- 
Mathieu Desnoyers
OpenPGP key fingerprint: 8CD5 52C3 8E3C 4140 715F  BA06 3F25 A8FE 3BAE 9A68

^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
  2009-06-25 14:25     ` Mathieu Desnoyers
@ 2009-06-25 15:03       ` Pallipadi, Venkatesh
  2009-06-25 22:17       ` Thomas Renninger
  1 sibling, 0 replies; 15+ messages in thread
From: Pallipadi, Venkatesh @ 2009-06-25 15:03 UTC (permalink / raw)
  To: Mathieu Desnoyers, Thomas Renninger
  Cc: kernel-soeCzev1AWYdnm+yROfE0A@public.gmane.org,
	cpufreq-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	mingo-X9Un+BFzKDI@public.gmane.org,
	rjw-KKrjLPT3xs0@public.gmane.org,
	hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org,
	penberg-bbCR+/B0CizivPeTLB3BmA@public.gmane.org,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	davej-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org

 

>-----Original Message-----
>From: Mathieu Desnoyers [mailto:mathieu.desnoyers-scC8bbJcJLCw5LPnMra/2Q@public.gmane.org] 
>Sent: Thursday, June 25, 2009 7:26 AM
>To: Thomas Renninger
>Cc: kernel-soeCzev1AWYdnm+yROfE0A@public.gmane.org; cpufreq-u79uwXL29TY76Z2rM5mHXA@public.gmane.org; 
>linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org; mingo-X9Un+BFzKDI@public.gmane.org; rjw-KKrjLPT3xs0@public.gmane.org; 
>hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org; penberg-bbCR+/B0CizivPeTLB3BmA@public.gmane.org; 
>kernel-testers-u79uwXL29TY76Z2rM5mHXA@public.gmane.org; davej-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org; Pallipadi, Venkatesh
>Subject: Re: [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes 
>from ondemand and conservative governors
>
>* Thomas Renninger (trenn-l3A5Bk7waGM@public.gmane.org) wrote:
>> Comment from Venkatesh:
>> ...
>> This mutex is just serializing the changes to those 
>variables. I could't
>> think of any functionality issues of not having the lock as such.
>> 
>> -> rip it out.
>> 
>> CC: Venkatesh Pallipadi <venkatesh.pallipadi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
>> Signed-off-by: Thomas Renninger <trenn-l3A5Bk7waGM@public.gmane.org>
>> ---
>>  drivers/cpufreq/cpufreq_conservative.c |   61 
>+++-----------------------------
>>  drivers/cpufreq/cpufreq_ondemand.c     |   48 
>+++----------------------
>>  2 files changed, 10 insertions(+), 99 deletions(-)
>> 
>> diff --git a/drivers/cpufreq/cpufreq_conservative.c 
>b/drivers/cpufreq/cpufreq_conservative.c
>> index 7a74d17..6303379 100644
>> --- a/drivers/cpufreq/cpufreq_conservative.c
>> +++ b/drivers/cpufreq/cpufreq_conservative.c
>> @@ -18,7 +18,6 @@
>>  #include <linux/cpu.h>
>>  #include <linux/jiffies.h>
>>  #include <linux/kernel_stat.h>
>> -#include <linux/mutex.h>
>>  #include <linux/hrtimer.h>
>>  #include <linux/tick.h>
>>  #include <linux/ktime.h>
>> @@ -84,19 +83,6 @@ static DEFINE_PER_CPU(struct 
>cpu_dbs_info_s, cpu_dbs_info);
>>  
>>  static unsigned int dbs_enable;	/* number of CPUs using 
>this policy */
>>  
>> -/*
>> - * DEADLOCK ALERT! There is a ordering requirement between 
>cpu_hotplug
>> - * lock and dbs_mutex. cpu_hotplug lock should always be held before
>> - * dbs_mutex. If any function that can potentially take 
>cpu_hotplug lock
>> - * (like __cpufreq_driver_target()) is being called with 
>dbs_mutex taken, then
>> - * cpu_hotplug lock should be taken before that. Note that 
>cpu_hotplug lock
>> - * is recursive for the same process. -Venki
>> - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the 
>dbs_mutex, because it
>> - * would deadlock with cancel_delayed_work_sync(), which is 
>needed for proper
>> - * raceless workqueue teardown.
>> - */
>> -static DEFINE_MUTEX(dbs_mutex);
>> -
>>  static struct workqueue_struct	*kconservative_wq;
>>  
>>  static struct dbs_tuners {
>> @@ -236,10 +222,7 @@ static ssize_t 
>store_sampling_down_factor(struct cpufreq_policy *unused,
>>  	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
>>  		return -EINVAL;
>>  
>> -	mutex_lock(&dbs_mutex);
>>  	dbs_tuners_ins.sampling_down_factor = input;
>> -	mutex_unlock(&dbs_mutex);
>> -
>>  	return count;
>>  }
>>  
>> @@ -253,10 +236,7 @@ static ssize_t 
>store_sampling_rate(struct cpufreq_policy *unused,
>>  	if (ret != 1)
>>  		return -EINVAL;
>>  
>> -	mutex_lock(&dbs_mutex);
>>  	dbs_tuners_ins.sampling_rate = max(input, 
>minimum_sampling_rate());
>> -	mutex_unlock(&dbs_mutex);
>> -
>>  	return count;
>>  }
>>  
>> @@ -267,16 +247,11 @@ static ssize_t 
>store_up_threshold(struct cpufreq_policy *unused,
>>  	int ret;
>>  	ret = sscanf(buf, "%u", &input);
>>  
>> -	mutex_lock(&dbs_mutex);
>>  	if (ret != 1 || input > 100 ||
>> -			input <= dbs_tuners_ins.down_threshold) {
>> -		mutex_unlock(&dbs_mutex);
>> +			input <= dbs_tuners_ins.down_threshold)
>>  		return -EINVAL;
>> -	}
>>  
>>  	dbs_tuners_ins.up_threshold = input;
>> -	mutex_unlock(&dbs_mutex);
>
>Here, for instance, there might be a problem if down_threshold is
>changed concurrently with a store_up_threshold() call. See 
>that there is
>a test before the modification, and we need the mutex there 
>for it to be
>consistent.
>
>> -
>>  	return count;
>>  }
>>  
>> @@ -287,17 +262,12 @@ static ssize_t 
>store_down_threshold(struct cpufreq_policy *unused,
>>  	int ret;
>>  	ret = sscanf(buf, "%u", &input);
>>  
>> -	mutex_lock(&dbs_mutex);
>>  	/* cannot be lower than 11 otherwise freq will not fall */
>>  	if (ret != 1 || input < 11 || input > 100 ||
>> -			input >= dbs_tuners_ins.up_threshold) {
>> -		mutex_unlock(&dbs_mutex);
>> +			input >= dbs_tuners_ins.up_threshold)
>>  		return -EINVAL;
>> -	}
>>  
>>  	dbs_tuners_ins.down_threshold = input;
>> -	mutex_unlock(&dbs_mutex);
>> -
>>  	return count;
>>  }
>>  
>> @@ -316,11 +286,9 @@ static ssize_t 
>store_ignore_nice_load(struct cpufreq_policy *policy,
>>  	if (input > 1)
>>  		input = 1;
>>  
>> -	mutex_lock(&dbs_mutex);
>> -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
>> -		mutex_unlock(&dbs_mutex);
>> +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
>>  		return count;
>> -	}
>> +
>>  	dbs_tuners_ins.ignore_nice = input;
>>  
>>  	/* we need to re-evaluate prev_cpu_idle */
>> @@ -332,8 +300,6 @@ static ssize_t 
>store_ignore_nice_load(struct cpufreq_policy *policy,
>>  		if (dbs_tuners_ins.ignore_nice)
>>  			dbs_info->prev_cpu_nice = 
>kstat_cpu(j).cpustat.nice;
>>  	}
>> -	mutex_unlock(&dbs_mutex);
>> -
>>  	return count;
>>  }
>>  
>> @@ -352,10 +318,7 @@ static ssize_t store_freq_step(struct 
>cpufreq_policy *policy,
>>  
>>  	/* no need to test here if freq_step is zero as the 
>user might actually
>>  	 * want this, they would be crazy though :) */
>> -	mutex_lock(&dbs_mutex);
>>  	dbs_tuners_ins.freq_step = input;
>> -	mutex_unlock(&dbs_mutex);
>> -
>>  	return count;
>>  }
>>  
>> @@ -566,13 +529,9 @@ static int cpufreq_governor_dbs(struct 
>cpufreq_policy *policy,
>
>Hrm, this is where we want the mutexes removed, but I fear this is
>creating a race between sysfs_create_group (sysfs file creation) and
>policy initialization...
>
>I'm not convinced this mutex is not needed.
>

I am with Mathieu on this one. We can reduce the use of mutex here. But, it will still be needed. As I mentioned earlier, we need it to protect dbs_tuners getting changed from different CPUs at the same time. We also need it for dbs_enable change in start and stop from different CPUs. Otherwise dbs_enable increment/decrement and test will have races. I was playing with some changes for this. I should have a cleaner patchset later today...

Thanks,
Venki--
To unsubscribe from this list: send the line "unsubscribe kernel-testers" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
  2009-06-25 14:25     ` Mathieu Desnoyers
  2009-06-25 15:03       ` Pallipadi, Venkatesh
@ 2009-06-25 22:17       ` Thomas Renninger
       [not found]         ` <200906260017.10730.trenn-l3A5Bk7waGM@public.gmane.org>
  1 sibling, 1 reply; 15+ messages in thread
From: Thomas Renninger @ 2009-06-25 22:17 UTC (permalink / raw)
  To: Mathieu Desnoyers
  Cc: kernel-soeCzev1AWYdnm+yROfE0A, cpufreq-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA, mingo-X9Un+BFzKDI,
	rjw-KKrjLPT3xs0, hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	penberg-bbCR+/B0CizivPeTLB3BmA,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA,
	davej-H+wXaHxf7aLQT0dZR+AlfA, Venkatesh Pallipadi

On Thursday 25 June 2009 04:25:52 pm Mathieu Desnoyers wrote:
> * Thomas Renninger (trenn-l3A5Bk7waGM@public.gmane.org) wrote:
> > Comment from Venkatesh:
> > ...
> > This mutex is just serializing the changes to those variables. I could't
> > think of any functionality issues of not having the lock as such.
> >
> > -> rip it out.
> >
> > CC: Venkatesh Pallipadi <venkatesh.pallipadi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> > Signed-off-by: Thomas Renninger <trenn-l3A5Bk7waGM@public.gmane.org>
> > ---
> >  drivers/cpufreq/cpufreq_conservative.c |   61
> > +++----------------------------- drivers/cpufreq/cpufreq_ondemand.c     |
> >   48 +++---------------------- 2 files changed, 10 insertions(+), 99
> > deletions(-)
> >
> > diff --git a/drivers/cpufreq/cpufreq_conservative.c
> > b/drivers/cpufreq/cpufreq_conservative.c index 7a74d17..6303379 100644
> > --- a/drivers/cpufreq/cpufreq_conservative.c
> > +++ b/drivers/cpufreq/cpufreq_conservative.c
> > @@ -18,7 +18,6 @@
> >  #include <linux/cpu.h>
> >  #include <linux/jiffies.h>
> >  #include <linux/kernel_stat.h>
> > -#include <linux/mutex.h>
> >  #include <linux/hrtimer.h>
> >  #include <linux/tick.h>
> >  #include <linux/ktime.h>
> > @@ -84,19 +83,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s,
> > cpu_dbs_info);
> >
> >  static unsigned int dbs_enable;	/* number of CPUs using this policy */
> >
> > -/*
> > - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> > - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> > - * dbs_mutex. If any function that can potentially take cpu_hotplug lock
> > - * (like __cpufreq_driver_target()) is being called with dbs_mutex
> > taken, then - * cpu_hotplug lock should be taken before that. Note that
> > cpu_hotplug lock - * is recursive for the same process. -Venki
> > - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex,
> > because it - * would deadlock with cancel_delayed_work_sync(), which is
> > needed for proper - * raceless workqueue teardown.
> > - */
> > -static DEFINE_MUTEX(dbs_mutex);
> > -
> >  static struct workqueue_struct	*kconservative_wq;
> >
> >  static struct dbs_tuners {
> > @@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct
> > cpufreq_policy *unused, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR
> > || input < 1) return -EINVAL;
> >
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.sampling_down_factor = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >
> > @@ -253,10 +236,7 @@ static ssize_t store_sampling_rate(struct
> > cpufreq_policy *unused, if (ret != 1)
> >  		return -EINVAL;
> >
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >
> > @@ -267,16 +247,11 @@ static ssize_t store_up_threshold(struct
> > cpufreq_policy *unused, int ret;
> >  	ret = sscanf(buf, "%u", &input);
> >
> > -	mutex_lock(&dbs_mutex);
> >  	if (ret != 1 || input > 100 ||
> > -			input <= dbs_tuners_ins.down_threshold) {
> > -		mutex_unlock(&dbs_mutex);
> > +			input <= dbs_tuners_ins.down_threshold)
> >  		return -EINVAL;
> > -	}
> >
> >  	dbs_tuners_ins.up_threshold = input;
> > -	mutex_unlock(&dbs_mutex);
>
> Here, for instance, there might be a problem if down_threshold is
> changed concurrently with a store_up_threshold() call. See that there is
> a test before the modification, and we need the mutex there for it to be
> consistent.
Thanks, I was rather quick with the conservative changes..., but
it should still be ok.

It should be assured that if userspace is doing:
echo x > down_threshold
echo y > up_threshold
that the first one will be served/finished first?

If userspace is writing different values for each core to the global 
conservative/ondemand tunables, or you have concurent userspace tools
trying to configure ondemand/conservative, it's a userspace bug.
It's confusing that ondemand/conservative allows per core reads/writes to
global variables and I hope to be able to provide something to change that in 
some days, maybe weeks.

If you still can think of a possible issue, a userspace scenario would
help.

> > -
> >  	return count;
> >  }
> >
> > @@ -287,17 +262,12 @@ static ssize_t store_down_threshold(struct
> > cpufreq_policy *unused, int ret;
> >  	ret = sscanf(buf, "%u", &input);
> >
> > -	mutex_lock(&dbs_mutex);
> >  	/* cannot be lower than 11 otherwise freq will not fall */
> >  	if (ret != 1 || input < 11 || input > 100 ||
> > -			input >= dbs_tuners_ins.up_threshold) {
> > -		mutex_unlock(&dbs_mutex);
> > +			input >= dbs_tuners_ins.up_threshold)
> >  		return -EINVAL;
> > -	}
> >
> >  	dbs_tuners_ins.down_threshold = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >
> > @@ -316,11 +286,9 @@ static ssize_t store_ignore_nice_load(struct
> > cpufreq_policy *policy, if (input > 1)
> >  		input = 1;
> >
> > -	mutex_lock(&dbs_mutex);
> > -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> > -		mutex_unlock(&dbs_mutex);
> > +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
> >  		return count;
> > -	}
> > +
> >  	dbs_tuners_ins.ignore_nice = input;
> >
> >  	/* we need to re-evaluate prev_cpu_idle */
> > @@ -332,8 +300,6 @@ static ssize_t store_ignore_nice_load(struct
> > cpufreq_policy *policy, if (dbs_tuners_ins.ignore_nice)
> >  			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
> >  	}
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >
> > @@ -352,10 +318,7 @@ static ssize_t store_freq_step(struct cpufreq_policy
> > *policy,
> >
> >  	/* no need to test here if freq_step is zero as the user might actually
> >  	 * want this, they would be crazy though :) */
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.freq_step = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >
> > @@ -566,13 +529,9 @@ static int cpufreq_governor_dbs(struct
> > cpufreq_policy *policy,
>
> Hrm, this is where we want the mutexes removed, but I fear this is
> creating a race between sysfs_create_group (sysfs file creation) and
> policy initialization...
This can be solved by moving this_dbs_info->enable incremenation
after sysfs_create_group.
But yes, I forgot that in my patch, thanks!

> I'm not convinced this mutex is not needed.
I am. Maybe it still takes some more thinking or step by step rework.
Finding an unintrusive, riskless short term solution for .30 is a challenge, 
though.

    Thomas

> Mathieu
>
> >  		if (this_dbs_info->enable) /* Already enabled */
> >  			break;
> >
> > -		mutex_lock(&dbs_mutex);
> > -
> >  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
> > -		if (rc) {
> > -			mutex_unlock(&dbs_mutex);
> > +		if (rc)
> >  			return rc;
> > -		}
> >
> >  		for_each_cpu(j, policy->cpus) {
> >  			struct cpu_dbs_info_s *j_dbs_info;
> > @@ -612,13 +571,9 @@ static int cpufreq_governor_dbs(struct
> > cpufreq_policy *policy, CPUFREQ_TRANSITION_NOTIFIER);
> >  		}
> >  		dbs_timer_init(this_dbs_info);
> > -
> > -		mutex_unlock(&dbs_mutex);
> > -
> >  		break;
> >
> >  	case CPUFREQ_GOV_STOP:
> > -		mutex_lock(&dbs_mutex);
> >  		dbs_timer_exit(this_dbs_info);
> >  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
> >  		dbs_enable--;
> > @@ -631,13 +586,9 @@ static int cpufreq_governor_dbs(struct
> > cpufreq_policy *policy, cpufreq_unregister_notifier(
> >  					&dbs_cpufreq_notifier_block,
> >  					CPUFREQ_TRANSITION_NOTIFIER);
> > -
> > -		mutex_unlock(&dbs_mutex);
> > -
> >  		break;
> >
> >  	case CPUFREQ_GOV_LIMITS:
> > -		mutex_lock(&dbs_mutex);
> >  		if (policy->max < this_dbs_info->cur_policy->cur)
> >  			__cpufreq_driver_target(
> >  					this_dbs_info->cur_policy,
> > @@ -646,8 +597,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy
> > *policy, __cpufreq_driver_target(
> >  					this_dbs_info->cur_policy,
> >  					policy->min, CPUFREQ_RELATION_L);
> > -		mutex_unlock(&dbs_mutex);
> > -
> >  		break;
> >  	}
> >  	return 0;
> > diff --git a/drivers/cpufreq/cpufreq_ondemand.c
> > b/drivers/cpufreq/cpufreq_ondemand.c index e741c33..d080a48 100644
> > --- a/drivers/cpufreq/cpufreq_ondemand.c
> > +++ b/drivers/cpufreq/cpufreq_ondemand.c
> > @@ -17,7 +17,6 @@
> >  #include <linux/cpu.h>
> >  #include <linux/jiffies.h>
> >  #include <linux/kernel_stat.h>
> > -#include <linux/mutex.h>
> >  #include <linux/hrtimer.h>
> >  #include <linux/tick.h>
> >  #include <linux/ktime.h>
> > @@ -91,19 +90,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s,
> > cpu_dbs_info);
> >
> >  static unsigned int dbs_enable;	/* number of CPUs using this policy */
> >
> > -/*
> > - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> > - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> > - * dbs_mutex. If any function that can potentially take cpu_hotplug lock
> > - * (like __cpufreq_driver_target()) is being called with dbs_mutex
> > taken, then - * cpu_hotplug lock should be taken before that. Note that
> > cpu_hotplug lock - * is recursive for the same process. -Venki
> > - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex,
> > because it - * would deadlock with cancel_delayed_work_sync(), which is
> > needed for proper - * raceless workqueue teardown.
> > - */
> > -static DEFINE_MUTEX(dbs_mutex);
> > -
> >  static struct workqueue_struct	*kondemand_wq;
> >
> >  static struct dbs_tuners {
> > @@ -269,14 +255,10 @@ static ssize_t store_sampling_rate(struct
> > cpufreq_policy *unused, int ret;
> >  	ret = sscanf(buf, "%u", &input);
> >
> > -	mutex_lock(&dbs_mutex);
> > -	if (ret != 1) {
> > -		mutex_unlock(&dbs_mutex);
> > +	if (ret != 1)
> >  		return -EINVAL;
> > -	}
> > -	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> > -	mutex_unlock(&dbs_mutex);
> >
> > +	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> >  	return count;
> >  }
> >
> > @@ -287,16 +269,11 @@ static ssize_t store_up_threshold(struct
> > cpufreq_policy *unused, int ret;
> >  	ret = sscanf(buf, "%u", &input);
> >
> > -	mutex_lock(&dbs_mutex);
> >  	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
> > -			input < MIN_FREQUENCY_UP_THRESHOLD) {
> > -		mutex_unlock(&dbs_mutex);
> > +			input < MIN_FREQUENCY_UP_THRESHOLD)
> >  		return -EINVAL;
> > -	}
> >
> >  	dbs_tuners_ins.up_threshold = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >
> > @@ -315,11 +292,9 @@ static ssize_t store_ignore_nice_load(struct
> > cpufreq_policy *policy, if (input > 1)
> >  		input = 1;
> >
> > -	mutex_lock(&dbs_mutex);
> > -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> > -		mutex_unlock(&dbs_mutex);
> > +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
> >  		return count;
> > -	}
> > +
> >  	dbs_tuners_ins.ignore_nice = input;
> >
> >  	/* we need to re-evaluate prev_cpu_idle */
> > @@ -332,8 +307,6 @@ static ssize_t store_ignore_nice_load(struct
> > cpufreq_policy *policy, dbs_info->prev_cpu_nice =
> > kstat_cpu(j).cpustat.nice;
> >
> >  	}
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >
> > @@ -350,10 +323,8 @@ static ssize_t store_powersave_bias(struct
> > cpufreq_policy *unused, if (input > 1000)
> >  		input = 1000;
> >
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.powersave_bias = input;
> >  	ondemand_powersave_bias_init();
> > -	mutex_unlock(&dbs_mutex);
> >
> >  	return count;
> >  }
> > @@ -586,13 +557,11 @@ static int cpufreq_governor_dbs(struct
> > cpufreq_policy *policy, if (this_dbs_info->enable) /* Already enabled */
> >  			break;
> >
> > -		mutex_lock(&dbs_mutex);
> >  		dbs_enable++;
> >
> >  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
> >  		if (rc) {
> >  			dbs_enable--;
> > -			mutex_unlock(&dbs_mutex);
> >  			return rc;
> >  		}
> >
> > @@ -627,28 +596,21 @@ static int cpufreq_governor_dbs(struct
> > cpufreq_policy *policy, dbs_tuners_ins.sampling_rate = def_sampling_rate;
> >  		}
> >  		dbs_timer_init(this_dbs_info);
> > -
> > -		mutex_unlock(&dbs_mutex);
> >  		break;
> >
> >  	case CPUFREQ_GOV_STOP:
> > -		mutex_lock(&dbs_mutex);
> >  		dbs_timer_exit(this_dbs_info);
> >  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
> >  		dbs_enable--;
> > -		mutex_unlock(&dbs_mutex);
> > -
> >  		break;
> >
> >  	case CPUFREQ_GOV_LIMITS:
> > -		mutex_lock(&dbs_mutex);
> >  		if (policy->max < this_dbs_info->cur_policy->cur)
> >  			__cpufreq_driver_target(this_dbs_info->cur_policy,
> >  				policy->max, CPUFREQ_RELATION_H);
> >  		else if (policy->min > this_dbs_info->cur_policy->cur)
> >  			__cpufreq_driver_target(this_dbs_info->cur_policy,
> >  				policy->min, CPUFREQ_RELATION_L);
> > -		mutex_unlock(&dbs_mutex);
> >  		break;
> >  	}
> >  	return 0;
> > --
> > 1.6.0.2


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
       [not found]         ` <200906260017.10730.trenn-l3A5Bk7waGM@public.gmane.org>
@ 2009-06-25 22:26           ` Thomas Renninger
  0 siblings, 0 replies; 15+ messages in thread
From: Thomas Renninger @ 2009-06-25 22:26 UTC (permalink / raw)
  To: Mathieu Desnoyers
  Cc: kernel-soeCzev1AWYdnm+yROfE0A, cpufreq-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA, mingo-X9Un+BFzKDI,
	rjw-KKrjLPT3xs0, hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	penberg-bbCR+/B0CizivPeTLB3BmA,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA,
	davej-H+wXaHxf7aLQT0dZR+AlfA, Venkatesh Pallipadi

On Friday 26 June 2009 12:17:09 am Thomas Renninger wrote:
> On Thursday 25 June 2009 04:25:52 pm Mathieu Desnoyers wrote:
> > * Thomas Renninger (trenn-l3A5Bk7waGM@public.gmane.org) wrote:
> > > Comment from Venkatesh:
> > > ...
> > > This mutex is just serializing the changes to those variables. I
> > > could't think of any functionality issues of not having the lock as
> > > such.
> > >
> > > -> rip it out.
> > >
> > > CC: Venkatesh Pallipadi <venkatesh.pallipadi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> > > Signed-off-by: Thomas Renninger <trenn-l3A5Bk7waGM@public.gmane.org>
> > > ---
> > >  drivers/cpufreq/cpufreq_conservative.c |   61
> > > +++----------------------------- drivers/cpufreq/cpufreq_ondemand.c    
> > > | 48 +++---------------------- 2 files changed, 10 insertions(+), 99
> > > deletions(-)
> > >
> > > diff --git a/drivers/cpufreq/cpufreq_conservative.c
> > > b/drivers/cpufreq/cpufreq_conservative.c index 7a74d17..6303379 100644
> > > --- a/drivers/cpufreq/cpufreq_conservative.c
> > > +++ b/drivers/cpufreq/cpufreq_conservative.c
> > > @@ -18,7 +18,6 @@
> > >  #include <linux/cpu.h>
> > >  #include <linux/jiffies.h>
> > >  #include <linux/kernel_stat.h>
> > > -#include <linux/mutex.h>
> > >  #include <linux/hrtimer.h>
> > >  #include <linux/tick.h>
> > >  #include <linux/ktime.h>
> > > @@ -84,19 +83,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s,
> > > cpu_dbs_info);
> > >
> > >  static unsigned int dbs_enable;	/* number of CPUs using this policy */
> > >
> > > -/*
> > > - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> > > - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> > > - * dbs_mutex. If any function that can potentially take cpu_hotplug
> > > lock - * (like __cpufreq_driver_target()) is being called with
> > > dbs_mutex taken, then - * cpu_hotplug lock should be taken before that.
> > > Note that cpu_hotplug lock - * is recursive for the same process.
> > > -Venki - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the
> > > dbs_mutex, because it - * would deadlock with
> > > cancel_delayed_work_sync(), which is needed for proper - * raceless
> > > workqueue teardown.
> > > - */
> > > -static DEFINE_MUTEX(dbs_mutex);
> > > -
> > >  static struct workqueue_struct	*kconservative_wq;
> > >
> > >  static struct dbs_tuners {
> > > @@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct
> > > cpufreq_policy *unused, if (ret != 1 || input >
> > > MAX_SAMPLING_DOWN_FACTOR
> > >
> > > || input < 1) return -EINVAL;
> > >
> > > -	mutex_lock(&dbs_mutex);
> > >  	dbs_tuners_ins.sampling_down_factor = input;
> > > -	mutex_unlock(&dbs_mutex);
> > > -
> > >  	return count;
> > >  }
> > >
> > > @@ -253,10 +236,7 @@ static ssize_t store_sampling_rate(struct
> > > cpufreq_policy *unused, if (ret != 1)
> > >  		return -EINVAL;
> > >
> > > -	mutex_lock(&dbs_mutex);
> > >  	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> > > -	mutex_unlock(&dbs_mutex);
> > > -
> > >  	return count;
> > >  }
> > >
> > > @@ -267,16 +247,11 @@ static ssize_t store_up_threshold(struct
> > > cpufreq_policy *unused, int ret;
> > >  	ret = sscanf(buf, "%u", &input);
> > >
> > > -	mutex_lock(&dbs_mutex);
> > >  	if (ret != 1 || input > 100 ||
> > > -			input <= dbs_tuners_ins.down_threshold) {
> > > -		mutex_unlock(&dbs_mutex);
> > > +			input <= dbs_tuners_ins.down_threshold)
> > >  		return -EINVAL;
> > > -	}
> > >
> > >  	dbs_tuners_ins.up_threshold = input;
> > > -	mutex_unlock(&dbs_mutex);
> >
> > Here, for instance, there might be a problem if down_threshold is
> > changed concurrently with a store_up_threshold() call. See that there is
> > a test before the modification, and we need the mutex there for it to be
> > consistent.
>
> Thanks, I was rather quick with the conservative changes..., but
> it should still be ok.
>
> It should be assured that if userspace is doing:
> echo x > down_threshold
> echo y > up_threshold
> that the first one will be served/finished first?
>
> If userspace is writing different values for each core to the global
> conservative/ondemand tunables, or you have concurent userspace tools
> trying to configure ondemand/conservative, it's a userspace bug.
> It's confusing that ondemand/conservative allows per core reads/writes to
> global variables and I hope to be able to provide something to change that
> in some days, maybe weeks.
>
> If you still can think of a possible issue, a userspace scenario would
> help.
>
> > > -
> > >  	return count;
> > >  }
> > >
> > > @@ -287,17 +262,12 @@ static ssize_t store_down_threshold(struct
> > > cpufreq_policy *unused, int ret;
> > >  	ret = sscanf(buf, "%u", &input);
> > >
> > > -	mutex_lock(&dbs_mutex);
> > >  	/* cannot be lower than 11 otherwise freq will not fall */
> > >  	if (ret != 1 || input < 11 || input > 100 ||
> > > -			input >= dbs_tuners_ins.up_threshold) {
> > > -		mutex_unlock(&dbs_mutex);
> > > +			input >= dbs_tuners_ins.up_threshold)
> > >  		return -EINVAL;
> > > -	}
> > >
> > >  	dbs_tuners_ins.down_threshold = input;
> > > -	mutex_unlock(&dbs_mutex);
> > > -
> > >  	return count;
> > >  }
> > >
> > > @@ -316,11 +286,9 @@ static ssize_t store_ignore_nice_load(struct
> > > cpufreq_policy *policy, if (input > 1)
> > >  		input = 1;
> > >
> > > -	mutex_lock(&dbs_mutex);
> > > -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> > > -		mutex_unlock(&dbs_mutex);
> > > +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
> > >  		return count;
> > > -	}
> > > +
> > >  	dbs_tuners_ins.ignore_nice = input;
> > >
> > >  	/* we need to re-evaluate prev_cpu_idle */
> > > @@ -332,8 +300,6 @@ static ssize_t store_ignore_nice_load(struct
> > > cpufreq_policy *policy, if (dbs_tuners_ins.ignore_nice)
> > >  			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
> > >  	}
> > > -	mutex_unlock(&dbs_mutex);
> > > -
> > >  	return count;
> > >  }
> > >
> > > @@ -352,10 +318,7 @@ static ssize_t store_freq_step(struct
> > > cpufreq_policy *policy,
> > >
> > >  	/* no need to test here if freq_step is zero as the user might
> > > actually * want this, they would be crazy though :) */
> > > -	mutex_lock(&dbs_mutex);
> > >  	dbs_tuners_ins.freq_step = input;
> > > -	mutex_unlock(&dbs_mutex);
> > > -
> > >  	return count;
> > >  }
> > >
> > > @@ -566,13 +529,9 @@ static int cpufreq_governor_dbs(struct
> > > cpufreq_policy *policy,
> >
> > Hrm, this is where we want the mutexes removed, but I fear this is
> > creating a race between sysfs_create_group (sysfs file creation) and
> > policy initialization...
>
> This can be solved by moving this_dbs_info->enable incremenation
> after sysfs_create_group.
Forget this sentence, don't think about it, it's crap.
I better go to bed now...

       Thomas

> But yes, I forgot that in my patch, thanks!
>
> > I'm not convinced this mutex is not needed.
>
> I am. Maybe it still takes some more thinking or step by step rework.
> Finding an unintrusive, riskless short term solution for .30 is a
> challenge, though.
>
>     Thomas
>
> > Mathieu
> >
> > >  		if (this_dbs_info->enable) /* Already enabled */
> > >  			break;
> > >
> > > -		mutex_lock(&dbs_mutex);
> > > -
> > >  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
> > > -		if (rc) {
> > > -			mutex_unlock(&dbs_mutex);
> > > +		if (rc)
> > >  			return rc;
> > > -		}
> > >
> > >  		for_each_cpu(j, policy->cpus) {
> > >  			struct cpu_dbs_info_s *j_dbs_info;
> > > @@ -612,13 +571,9 @@ static int cpufreq_governor_dbs(struct
> > > cpufreq_policy *policy, CPUFREQ_TRANSITION_NOTIFIER);
> > >  		}
> > >  		dbs_timer_init(this_dbs_info);
> > > -
> > > -		mutex_unlock(&dbs_mutex);
> > > -
> > >  		break;
> > >
> > >  	case CPUFREQ_GOV_STOP:
> > > -		mutex_lock(&dbs_mutex);
> > >  		dbs_timer_exit(this_dbs_info);
> > >  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
> > >  		dbs_enable--;
> > > @@ -631,13 +586,9 @@ static int cpufreq_governor_dbs(struct
> > > cpufreq_policy *policy, cpufreq_unregister_notifier(
> > >  					&dbs_cpufreq_notifier_block,
> > >  					CPUFREQ_TRANSITION_NOTIFIER);
> > > -
> > > -		mutex_unlock(&dbs_mutex);
> > > -
> > >  		break;
> > >
> > >  	case CPUFREQ_GOV_LIMITS:
> > > -		mutex_lock(&dbs_mutex);
> > >  		if (policy->max < this_dbs_info->cur_policy->cur)
> > >  			__cpufreq_driver_target(
> > >  					this_dbs_info->cur_policy,
> > > @@ -646,8 +597,6 @@ static int cpufreq_governor_dbs(struct
> > > cpufreq_policy *policy, __cpufreq_driver_target(
> > >  					this_dbs_info->cur_policy,
> > >  					policy->min, CPUFREQ_RELATION_L);
> > > -		mutex_unlock(&dbs_mutex);
> > > -
> > >  		break;
> > >  	}
> > >  	return 0;
> > > diff --git a/drivers/cpufreq/cpufreq_ondemand.c
> > > b/drivers/cpufreq/cpufreq_ondemand.c index e741c33..d080a48 100644
> > > --- a/drivers/cpufreq/cpufreq_ondemand.c
> > > +++ b/drivers/cpufreq/cpufreq_ondemand.c
> > > @@ -17,7 +17,6 @@
> > >  #include <linux/cpu.h>
> > >  #include <linux/jiffies.h>
> > >  #include <linux/kernel_stat.h>
> > > -#include <linux/mutex.h>
> > >  #include <linux/hrtimer.h>
> > >  #include <linux/tick.h>
> > >  #include <linux/ktime.h>
> > > @@ -91,19 +90,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s,
> > > cpu_dbs_info);
> > >
> > >  static unsigned int dbs_enable;	/* number of CPUs using this policy */
> > >
> > > -/*
> > > - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> > > - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> > > - * dbs_mutex. If any function that can potentially take cpu_hotplug
> > > lock - * (like __cpufreq_driver_target()) is being called with
> > > dbs_mutex taken, then - * cpu_hotplug lock should be taken before that.
> > > Note that cpu_hotplug lock - * is recursive for the same process.
> > > -Venki - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the
> > > dbs_mutex, because it - * would deadlock with
> > > cancel_delayed_work_sync(), which is needed for proper - * raceless
> > > workqueue teardown.
> > > - */
> > > -static DEFINE_MUTEX(dbs_mutex);
> > > -
> > >  static struct workqueue_struct	*kondemand_wq;
> > >
> > >  static struct dbs_tuners {
> > > @@ -269,14 +255,10 @@ static ssize_t store_sampling_rate(struct
> > > cpufreq_policy *unused, int ret;
> > >  	ret = sscanf(buf, "%u", &input);
> > >
> > > -	mutex_lock(&dbs_mutex);
> > > -	if (ret != 1) {
> > > -		mutex_unlock(&dbs_mutex);
> > > +	if (ret != 1)
> > >  		return -EINVAL;
> > > -	}
> > > -	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> > > -	mutex_unlock(&dbs_mutex);
> > >
> > > +	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> > >  	return count;
> > >  }
> > >
> > > @@ -287,16 +269,11 @@ static ssize_t store_up_threshold(struct
> > > cpufreq_policy *unused, int ret;
> > >  	ret = sscanf(buf, "%u", &input);
> > >
> > > -	mutex_lock(&dbs_mutex);
> > >  	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
> > > -			input < MIN_FREQUENCY_UP_THRESHOLD) {
> > > -		mutex_unlock(&dbs_mutex);
> > > +			input < MIN_FREQUENCY_UP_THRESHOLD)
> > >  		return -EINVAL;
> > > -	}
> > >
> > >  	dbs_tuners_ins.up_threshold = input;
> > > -	mutex_unlock(&dbs_mutex);
> > > -
> > >  	return count;
> > >  }
> > >
> > > @@ -315,11 +292,9 @@ static ssize_t store_ignore_nice_load(struct
> > > cpufreq_policy *policy, if (input > 1)
> > >  		input = 1;
> > >
> > > -	mutex_lock(&dbs_mutex);
> > > -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> > > -		mutex_unlock(&dbs_mutex);
> > > +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
> > >  		return count;
> > > -	}
> > > +
> > >  	dbs_tuners_ins.ignore_nice = input;
> > >
> > >  	/* we need to re-evaluate prev_cpu_idle */
> > > @@ -332,8 +307,6 @@ static ssize_t store_ignore_nice_load(struct
> > > cpufreq_policy *policy, dbs_info->prev_cpu_nice =
> > > kstat_cpu(j).cpustat.nice;
> > >
> > >  	}
> > > -	mutex_unlock(&dbs_mutex);
> > > -
> > >  	return count;
> > >  }
> > >
> > > @@ -350,10 +323,8 @@ static ssize_t store_powersave_bias(struct
> > > cpufreq_policy *unused, if (input > 1000)
> > >  		input = 1000;
> > >
> > > -	mutex_lock(&dbs_mutex);
> > >  	dbs_tuners_ins.powersave_bias = input;
> > >  	ondemand_powersave_bias_init();
> > > -	mutex_unlock(&dbs_mutex);
> > >
> > >  	return count;
> > >  }
> > > @@ -586,13 +557,11 @@ static int cpufreq_governor_dbs(struct
> > > cpufreq_policy *policy, if (this_dbs_info->enable) /* Already enabled
> > > */ break;
> > >
> > > -		mutex_lock(&dbs_mutex);
> > >  		dbs_enable++;
> > >
> > >  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
> > >  		if (rc) {
> > >  			dbs_enable--;
> > > -			mutex_unlock(&dbs_mutex);
> > >  			return rc;
> > >  		}
> > >
> > > @@ -627,28 +596,21 @@ static int cpufreq_governor_dbs(struct
> > > cpufreq_policy *policy, dbs_tuners_ins.sampling_rate =
> > > def_sampling_rate; }
> > >  		dbs_timer_init(this_dbs_info);
> > > -
> > > -		mutex_unlock(&dbs_mutex);
> > >  		break;
> > >
> > >  	case CPUFREQ_GOV_STOP:
> > > -		mutex_lock(&dbs_mutex);
> > >  		dbs_timer_exit(this_dbs_info);
> > >  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
> > >  		dbs_enable--;
> > > -		mutex_unlock(&dbs_mutex);
> > > -
> > >  		break;
> > >
> > >  	case CPUFREQ_GOV_LIMITS:
> > > -		mutex_lock(&dbs_mutex);
> > >  		if (policy->max < this_dbs_info->cur_policy->cur)
> > >  			__cpufreq_driver_target(this_dbs_info->cur_policy,
> > >  				policy->max, CPUFREQ_RELATION_H);
> > >  		else if (policy->min > this_dbs_info->cur_policy->cur)
> > >  			__cpufreq_driver_target(this_dbs_info->cur_policy,
> > >  				policy->min, CPUFREQ_RELATION_L);
> > > -		mutex_unlock(&dbs_mutex);
> > >  		break;
> > >  	}
> > >  	return 0;
> > > --
> > > 1.6.0.2


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
       [not found]   ` <1245938485-12663-2-git-send-email-trenn-l3A5Bk7waGM@public.gmane.org>
  2009-06-25 14:25     ` Mathieu Desnoyers
@ 2009-06-30  6:33     ` Pavel Machek
       [not found]       ` <20090630063339.GF1351-+ZI9xUNit7I@public.gmane.org>
  2009-06-30 22:58     ` [stable] " Greg KH
  2 siblings, 1 reply; 15+ messages in thread
From: Pavel Machek @ 2009-06-30  6:33 UTC (permalink / raw)
  To: Thomas Renninger
  Cc: kernel-soeCzev1AWYdnm+yROfE0A, cpufreq-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA, mingo-X9Un+BFzKDI,
	rjw-KKrjLPT3xs0, hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	penberg-bbCR+/B0CizivPeTLB3BmA,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA,
	davej-H+wXaHxf7aLQT0dZR+AlfA,
	mathieu.desnoyers-scC8bbJcJLCw5LPnMra/2Q, Venkatesh Pallipadi

On Thu 2009-06-25 16:01:24, Thomas Renninger wrote:
> Comment from Venkatesh:
> ...
> This mutex is just serializing the changes to those variables. I could't
> think of any functionality issues of not having the lock as such.
> 
> -> rip it out.
> 
> CC: Venkatesh Pallipadi <venkatesh.pallipadi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Thomas Renninger <trenn-l3A5Bk7waGM@public.gmane.org>

>  static struct dbs_tuners {
> @@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
>  	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
>  		return -EINVAL;
>  
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.sampling_down_factor = input;
> -	mutex_unlock(&dbs_mutex);
> -

You'd need to make s_down_factor atomic_t for this to work....
								Pavel
-- 
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [stable] [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
       [not found]   ` <1245938485-12663-2-git-send-email-trenn-l3A5Bk7waGM@public.gmane.org>
  2009-06-25 14:25     ` Mathieu Desnoyers
  2009-06-30  6:33     ` Pavel Machek
@ 2009-06-30 22:58     ` Greg KH
       [not found]       ` <20090630225813.GB2634-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org>
  2 siblings, 1 reply; 15+ messages in thread
From: Greg KH @ 2009-06-30 22:58 UTC (permalink / raw)
  To: Thomas Renninger
  Cc: stable-DgEjT+Ai2ygdnm+yROfE0A,
	mathieu.desnoyers-scC8bbJcJLCw5LPnMra/2Q,
	hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	cpufreq-u79uwXL29TY76Z2rM5mHXA, rjw-KKrjLPT3xs0,
	penberg-bbCR+/B0CizivPeTLB3BmA, Venkatesh Pallipadi,
	davej-H+wXaHxf7aLQT0dZR+AlfA, mingo-X9Un+BFzKDI,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA

I don't see the patch below in Linus's tree.  If it's there, what is the
git commit id?

thanks,

greg k-h

On Thu, Jun 25, 2009 at 04:01:24PM +0200, Thomas Renninger wrote:
> Comment from Venkatesh:
> ...
> This mutex is just serializing the changes to those variables. I could't
> think of any functionality issues of not having the lock as such.
> 
> -> rip it out.
> 
> CC: Venkatesh Pallipadi <venkatesh.pallipadi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Thomas Renninger <trenn-l3A5Bk7waGM@public.gmane.org>
> ---
>  drivers/cpufreq/cpufreq_conservative.c |   61 +++-----------------------------
>  drivers/cpufreq/cpufreq_ondemand.c     |   48 +++----------------------
>  2 files changed, 10 insertions(+), 99 deletions(-)
> 
> diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
> index 7a74d17..6303379 100644
> --- a/drivers/cpufreq/cpufreq_conservative.c
> +++ b/drivers/cpufreq/cpufreq_conservative.c
> @@ -18,7 +18,6 @@
>  #include <linux/cpu.h>
>  #include <linux/jiffies.h>
>  #include <linux/kernel_stat.h>
> -#include <linux/mutex.h>
>  #include <linux/hrtimer.h>
>  #include <linux/tick.h>
>  #include <linux/ktime.h>
> @@ -84,19 +83,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
>  
>  static unsigned int dbs_enable;	/* number of CPUs using this policy */
>  
> -/*
> - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> - * dbs_mutex. If any function that can potentially take cpu_hotplug lock
> - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
> - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
> - * is recursive for the same process. -Venki
> - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
> - * would deadlock with cancel_delayed_work_sync(), which is needed for proper
> - * raceless workqueue teardown.
> - */
> -static DEFINE_MUTEX(dbs_mutex);
> -
>  static struct workqueue_struct	*kconservative_wq;
>  
>  static struct dbs_tuners {
> @@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
>  	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
>  		return -EINVAL;
>  
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.sampling_down_factor = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -253,10 +236,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
>  	if (ret != 1)
>  		return -EINVAL;
>  
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -267,16 +247,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
>  	int ret;
>  	ret = sscanf(buf, "%u", &input);
>  
> -	mutex_lock(&dbs_mutex);
>  	if (ret != 1 || input > 100 ||
> -			input <= dbs_tuners_ins.down_threshold) {
> -		mutex_unlock(&dbs_mutex);
> +			input <= dbs_tuners_ins.down_threshold)
>  		return -EINVAL;
> -	}
>  
>  	dbs_tuners_ins.up_threshold = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -287,17 +262,12 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
>  	int ret;
>  	ret = sscanf(buf, "%u", &input);
>  
> -	mutex_lock(&dbs_mutex);
>  	/* cannot be lower than 11 otherwise freq will not fall */
>  	if (ret != 1 || input < 11 || input > 100 ||
> -			input >= dbs_tuners_ins.up_threshold) {
> -		mutex_unlock(&dbs_mutex);
> +			input >= dbs_tuners_ins.up_threshold)
>  		return -EINVAL;
> -	}
>  
>  	dbs_tuners_ins.down_threshold = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -316,11 +286,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
>  	if (input > 1)
>  		input = 1;
>  
> -	mutex_lock(&dbs_mutex);
> -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> -		mutex_unlock(&dbs_mutex);
> +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
>  		return count;
> -	}
> +
>  	dbs_tuners_ins.ignore_nice = input;
>  
>  	/* we need to re-evaluate prev_cpu_idle */
> @@ -332,8 +300,6 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
>  		if (dbs_tuners_ins.ignore_nice)
>  			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
>  	}
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -352,10 +318,7 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
>  
>  	/* no need to test here if freq_step is zero as the user might actually
>  	 * want this, they would be crazy though :) */
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.freq_step = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -566,13 +529,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  		if (this_dbs_info->enable) /* Already enabled */
>  			break;
>  
> -		mutex_lock(&dbs_mutex);
> -
>  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
> -		if (rc) {
> -			mutex_unlock(&dbs_mutex);
> +		if (rc)
>  			return rc;
> -		}
>  
>  		for_each_cpu(j, policy->cpus) {
>  			struct cpu_dbs_info_s *j_dbs_info;
> @@ -612,13 +571,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  					CPUFREQ_TRANSITION_NOTIFIER);
>  		}
>  		dbs_timer_init(this_dbs_info);
> -
> -		mutex_unlock(&dbs_mutex);
> -
>  		break;
>  
>  	case CPUFREQ_GOV_STOP:
> -		mutex_lock(&dbs_mutex);
>  		dbs_timer_exit(this_dbs_info);
>  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
>  		dbs_enable--;
> @@ -631,13 +586,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  			cpufreq_unregister_notifier(
>  					&dbs_cpufreq_notifier_block,
>  					CPUFREQ_TRANSITION_NOTIFIER);
> -
> -		mutex_unlock(&dbs_mutex);
> -
>  		break;
>  
>  	case CPUFREQ_GOV_LIMITS:
> -		mutex_lock(&dbs_mutex);
>  		if (policy->max < this_dbs_info->cur_policy->cur)
>  			__cpufreq_driver_target(
>  					this_dbs_info->cur_policy,
> @@ -646,8 +597,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  			__cpufreq_driver_target(
>  					this_dbs_info->cur_policy,
>  					policy->min, CPUFREQ_RELATION_L);
> -		mutex_unlock(&dbs_mutex);
> -
>  		break;
>  	}
>  	return 0;
> diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
> index e741c33..d080a48 100644
> --- a/drivers/cpufreq/cpufreq_ondemand.c
> +++ b/drivers/cpufreq/cpufreq_ondemand.c
> @@ -17,7 +17,6 @@
>  #include <linux/cpu.h>
>  #include <linux/jiffies.h>
>  #include <linux/kernel_stat.h>
> -#include <linux/mutex.h>
>  #include <linux/hrtimer.h>
>  #include <linux/tick.h>
>  #include <linux/ktime.h>
> @@ -91,19 +90,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
>  
>  static unsigned int dbs_enable;	/* number of CPUs using this policy */
>  
> -/*
> - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> - * dbs_mutex. If any function that can potentially take cpu_hotplug lock
> - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
> - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
> - * is recursive for the same process. -Venki
> - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
> - * would deadlock with cancel_delayed_work_sync(), which is needed for proper
> - * raceless workqueue teardown.
> - */
> -static DEFINE_MUTEX(dbs_mutex);
> -
>  static struct workqueue_struct	*kondemand_wq;
>  
>  static struct dbs_tuners {
> @@ -269,14 +255,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
>  	int ret;
>  	ret = sscanf(buf, "%u", &input);
>  
> -	mutex_lock(&dbs_mutex);
> -	if (ret != 1) {
> -		mutex_unlock(&dbs_mutex);
> +	if (ret != 1)
>  		return -EINVAL;
> -	}
> -	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> -	mutex_unlock(&dbs_mutex);
>  
> +	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
>  	return count;
>  }
>  
> @@ -287,16 +269,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
>  	int ret;
>  	ret = sscanf(buf, "%u", &input);
>  
> -	mutex_lock(&dbs_mutex);
>  	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
> -			input < MIN_FREQUENCY_UP_THRESHOLD) {
> -		mutex_unlock(&dbs_mutex);
> +			input < MIN_FREQUENCY_UP_THRESHOLD)
>  		return -EINVAL;
> -	}
>  
>  	dbs_tuners_ins.up_threshold = input;
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -315,11 +292,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
>  	if (input > 1)
>  		input = 1;
>  
> -	mutex_lock(&dbs_mutex);
> -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> -		mutex_unlock(&dbs_mutex);
> +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
>  		return count;
> -	}
> +
>  	dbs_tuners_ins.ignore_nice = input;
>  
>  	/* we need to re-evaluate prev_cpu_idle */
> @@ -332,8 +307,6 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
>  			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
>  
>  	}
> -	mutex_unlock(&dbs_mutex);
> -
>  	return count;
>  }
>  
> @@ -350,10 +323,8 @@ static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
>  	if (input > 1000)
>  		input = 1000;
>  
> -	mutex_lock(&dbs_mutex);
>  	dbs_tuners_ins.powersave_bias = input;
>  	ondemand_powersave_bias_init();
> -	mutex_unlock(&dbs_mutex);
>  
>  	return count;
>  }
> @@ -586,13 +557,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  		if (this_dbs_info->enable) /* Already enabled */
>  			break;
>  
> -		mutex_lock(&dbs_mutex);
>  		dbs_enable++;
>  
>  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
>  		if (rc) {
>  			dbs_enable--;
> -			mutex_unlock(&dbs_mutex);
>  			return rc;
>  		}
>  
> @@ -627,28 +596,21 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
>  			dbs_tuners_ins.sampling_rate = def_sampling_rate;
>  		}
>  		dbs_timer_init(this_dbs_info);
> -
> -		mutex_unlock(&dbs_mutex);
>  		break;
>  
>  	case CPUFREQ_GOV_STOP:
> -		mutex_lock(&dbs_mutex);
>  		dbs_timer_exit(this_dbs_info);
>  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
>  		dbs_enable--;
> -		mutex_unlock(&dbs_mutex);
> -
>  		break;
>  
>  	case CPUFREQ_GOV_LIMITS:
> -		mutex_lock(&dbs_mutex);
>  		if (policy->max < this_dbs_info->cur_policy->cur)
>  			__cpufreq_driver_target(this_dbs_info->cur_policy,
>  				policy->max, CPUFREQ_RELATION_H);
>  		else if (policy->min > this_dbs_info->cur_policy->cur)
>  			__cpufreq_driver_target(this_dbs_info->cur_policy,
>  				policy->min, CPUFREQ_RELATION_L);
> -		mutex_unlock(&dbs_mutex);
>  		break;
>  	}
>  	return 0;
> -- 
> 1.6.0.2
> 
> --
> To unsubscribe from this list: send the line "unsubscribe cpufreq" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
> _______________________________________________
> stable mailing list
> stable-CPWUtch7KCBzeIdxy0IIJw@public.gmane.org
> http://linux.kernel.org/mailman/listinfo/stable

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [stable] [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
       [not found]       ` <20090630225813.GB2634-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org>
@ 2009-06-30 23:14         ` Mathieu Desnoyers
  2009-06-30 23:39           ` Greg KH
  0 siblings, 1 reply; 15+ messages in thread
From: Mathieu Desnoyers @ 2009-06-30 23:14 UTC (permalink / raw)
  To: Greg KH
  Cc: Thomas Renninger, stable-DgEjT+Ai2ygdnm+yROfE0A,
	hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	cpufreq-u79uwXL29TY76Z2rM5mHXA, rjw-KKrjLPT3xs0,
	penberg-bbCR+/B0CizivPeTLB3BmA, Venkatesh Pallipadi,
	davej-H+wXaHxf7aLQT0dZR+AlfA, mingo-X9Un+BFzKDI,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA

* Greg KH (greg-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org) wrote:
> I don't see the patch below in Linus's tree.  If it's there, what is the
> git commit id?
> 

As I pointed out in an earlier reply, this patch is bogus and adds racy
data structure updates. It should not be merged.

Venkatesh is working on a proper fix.

Mathieu

> thanks,
> 
> greg k-h
> 
> On Thu, Jun 25, 2009 at 04:01:24PM +0200, Thomas Renninger wrote:
> > Comment from Venkatesh:
> > ...
> > This mutex is just serializing the changes to those variables. I could't
> > think of any functionality issues of not having the lock as such.
> > 
> > -> rip it out.
> > 
> > CC: Venkatesh Pallipadi <venkatesh.pallipadi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> > Signed-off-by: Thomas Renninger <trenn-l3A5Bk7waGM@public.gmane.org>
> > ---
> >  drivers/cpufreq/cpufreq_conservative.c |   61 +++-----------------------------
> >  drivers/cpufreq/cpufreq_ondemand.c     |   48 +++----------------------
> >  2 files changed, 10 insertions(+), 99 deletions(-)
> > 
> > diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
> > index 7a74d17..6303379 100644
> > --- a/drivers/cpufreq/cpufreq_conservative.c
> > +++ b/drivers/cpufreq/cpufreq_conservative.c
> > @@ -18,7 +18,6 @@
> >  #include <linux/cpu.h>
> >  #include <linux/jiffies.h>
> >  #include <linux/kernel_stat.h>
> > -#include <linux/mutex.h>
> >  #include <linux/hrtimer.h>
> >  #include <linux/tick.h>
> >  #include <linux/ktime.h>
> > @@ -84,19 +83,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
> >  
> >  static unsigned int dbs_enable;	/* number of CPUs using this policy */
> >  
> > -/*
> > - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> > - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> > - * dbs_mutex. If any function that can potentially take cpu_hotplug lock
> > - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
> > - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
> > - * is recursive for the same process. -Venki
> > - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
> > - * would deadlock with cancel_delayed_work_sync(), which is needed for proper
> > - * raceless workqueue teardown.
> > - */
> > -static DEFINE_MUTEX(dbs_mutex);
> > -
> >  static struct workqueue_struct	*kconservative_wq;
> >  
> >  static struct dbs_tuners {
> > @@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
> >  	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
> >  		return -EINVAL;
> >  
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.sampling_down_factor = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >  
> > @@ -253,10 +236,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
> >  	if (ret != 1)
> >  		return -EINVAL;
> >  
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >  
> > @@ -267,16 +247,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
> >  	int ret;
> >  	ret = sscanf(buf, "%u", &input);
> >  
> > -	mutex_lock(&dbs_mutex);
> >  	if (ret != 1 || input > 100 ||
> > -			input <= dbs_tuners_ins.down_threshold) {
> > -		mutex_unlock(&dbs_mutex);
> > +			input <= dbs_tuners_ins.down_threshold)
> >  		return -EINVAL;
> > -	}
> >  
> >  	dbs_tuners_ins.up_threshold = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >  
> > @@ -287,17 +262,12 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
> >  	int ret;
> >  	ret = sscanf(buf, "%u", &input);
> >  
> > -	mutex_lock(&dbs_mutex);
> >  	/* cannot be lower than 11 otherwise freq will not fall */
> >  	if (ret != 1 || input < 11 || input > 100 ||
> > -			input >= dbs_tuners_ins.up_threshold) {
> > -		mutex_unlock(&dbs_mutex);
> > +			input >= dbs_tuners_ins.up_threshold)
> >  		return -EINVAL;
> > -	}
> >  
> >  	dbs_tuners_ins.down_threshold = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >  
> > @@ -316,11 +286,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
> >  	if (input > 1)
> >  		input = 1;
> >  
> > -	mutex_lock(&dbs_mutex);
> > -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> > -		mutex_unlock(&dbs_mutex);
> > +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
> >  		return count;
> > -	}
> > +
> >  	dbs_tuners_ins.ignore_nice = input;
> >  
> >  	/* we need to re-evaluate prev_cpu_idle */
> > @@ -332,8 +300,6 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
> >  		if (dbs_tuners_ins.ignore_nice)
> >  			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
> >  	}
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >  
> > @@ -352,10 +318,7 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
> >  
> >  	/* no need to test here if freq_step is zero as the user might actually
> >  	 * want this, they would be crazy though :) */
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.freq_step = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >  
> > @@ -566,13 +529,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
> >  		if (this_dbs_info->enable) /* Already enabled */
> >  			break;
> >  
> > -		mutex_lock(&dbs_mutex);
> > -
> >  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
> > -		if (rc) {
> > -			mutex_unlock(&dbs_mutex);
> > +		if (rc)
> >  			return rc;
> > -		}
> >  
> >  		for_each_cpu(j, policy->cpus) {
> >  			struct cpu_dbs_info_s *j_dbs_info;
> > @@ -612,13 +571,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
> >  					CPUFREQ_TRANSITION_NOTIFIER);
> >  		}
> >  		dbs_timer_init(this_dbs_info);
> > -
> > -		mutex_unlock(&dbs_mutex);
> > -
> >  		break;
> >  
> >  	case CPUFREQ_GOV_STOP:
> > -		mutex_lock(&dbs_mutex);
> >  		dbs_timer_exit(this_dbs_info);
> >  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
> >  		dbs_enable--;
> > @@ -631,13 +586,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
> >  			cpufreq_unregister_notifier(
> >  					&dbs_cpufreq_notifier_block,
> >  					CPUFREQ_TRANSITION_NOTIFIER);
> > -
> > -		mutex_unlock(&dbs_mutex);
> > -
> >  		break;
> >  
> >  	case CPUFREQ_GOV_LIMITS:
> > -		mutex_lock(&dbs_mutex);
> >  		if (policy->max < this_dbs_info->cur_policy->cur)
> >  			__cpufreq_driver_target(
> >  					this_dbs_info->cur_policy,
> > @@ -646,8 +597,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
> >  			__cpufreq_driver_target(
> >  					this_dbs_info->cur_policy,
> >  					policy->min, CPUFREQ_RELATION_L);
> > -		mutex_unlock(&dbs_mutex);
> > -
> >  		break;
> >  	}
> >  	return 0;
> > diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
> > index e741c33..d080a48 100644
> > --- a/drivers/cpufreq/cpufreq_ondemand.c
> > +++ b/drivers/cpufreq/cpufreq_ondemand.c
> > @@ -17,7 +17,6 @@
> >  #include <linux/cpu.h>
> >  #include <linux/jiffies.h>
> >  #include <linux/kernel_stat.h>
> > -#include <linux/mutex.h>
> >  #include <linux/hrtimer.h>
> >  #include <linux/tick.h>
> >  #include <linux/ktime.h>
> > @@ -91,19 +90,6 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
> >  
> >  static unsigned int dbs_enable;	/* number of CPUs using this policy */
> >  
> > -/*
> > - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
> > - * lock and dbs_mutex. cpu_hotplug lock should always be held before
> > - * dbs_mutex. If any function that can potentially take cpu_hotplug lock
> > - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
> > - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
> > - * is recursive for the same process. -Venki
> > - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
> > - * would deadlock with cancel_delayed_work_sync(), which is needed for proper
> > - * raceless workqueue teardown.
> > - */
> > -static DEFINE_MUTEX(dbs_mutex);
> > -
> >  static struct workqueue_struct	*kondemand_wq;
> >  
> >  static struct dbs_tuners {
> > @@ -269,14 +255,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
> >  	int ret;
> >  	ret = sscanf(buf, "%u", &input);
> >  
> > -	mutex_lock(&dbs_mutex);
> > -	if (ret != 1) {
> > -		mutex_unlock(&dbs_mutex);
> > +	if (ret != 1)
> >  		return -EINVAL;
> > -	}
> > -	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> > -	mutex_unlock(&dbs_mutex);
> >  
> > +	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
> >  	return count;
> >  }
> >  
> > @@ -287,16 +269,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
> >  	int ret;
> >  	ret = sscanf(buf, "%u", &input);
> >  
> > -	mutex_lock(&dbs_mutex);
> >  	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
> > -			input < MIN_FREQUENCY_UP_THRESHOLD) {
> > -		mutex_unlock(&dbs_mutex);
> > +			input < MIN_FREQUENCY_UP_THRESHOLD)
> >  		return -EINVAL;
> > -	}
> >  
> >  	dbs_tuners_ins.up_threshold = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >  
> > @@ -315,11 +292,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
> >  	if (input > 1)
> >  		input = 1;
> >  
> > -	mutex_lock(&dbs_mutex);
> > -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
> > -		mutex_unlock(&dbs_mutex);
> > +	if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
> >  		return count;
> > -	}
> > +
> >  	dbs_tuners_ins.ignore_nice = input;
> >  
> >  	/* we need to re-evaluate prev_cpu_idle */
> > @@ -332,8 +307,6 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
> >  			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
> >  
> >  	}
> > -	mutex_unlock(&dbs_mutex);
> > -
> >  	return count;
> >  }
> >  
> > @@ -350,10 +323,8 @@ static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
> >  	if (input > 1000)
> >  		input = 1000;
> >  
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.powersave_bias = input;
> >  	ondemand_powersave_bias_init();
> > -	mutex_unlock(&dbs_mutex);
> >  
> >  	return count;
> >  }
> > @@ -586,13 +557,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
> >  		if (this_dbs_info->enable) /* Already enabled */
> >  			break;
> >  
> > -		mutex_lock(&dbs_mutex);
> >  		dbs_enable++;
> >  
> >  		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
> >  		if (rc) {
> >  			dbs_enable--;
> > -			mutex_unlock(&dbs_mutex);
> >  			return rc;
> >  		}
> >  
> > @@ -627,28 +596,21 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
> >  			dbs_tuners_ins.sampling_rate = def_sampling_rate;
> >  		}
> >  		dbs_timer_init(this_dbs_info);
> > -
> > -		mutex_unlock(&dbs_mutex);
> >  		break;
> >  
> >  	case CPUFREQ_GOV_STOP:
> > -		mutex_lock(&dbs_mutex);
> >  		dbs_timer_exit(this_dbs_info);
> >  		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
> >  		dbs_enable--;
> > -		mutex_unlock(&dbs_mutex);
> > -
> >  		break;
> >  
> >  	case CPUFREQ_GOV_LIMITS:
> > -		mutex_lock(&dbs_mutex);
> >  		if (policy->max < this_dbs_info->cur_policy->cur)
> >  			__cpufreq_driver_target(this_dbs_info->cur_policy,
> >  				policy->max, CPUFREQ_RELATION_H);
> >  		else if (policy->min > this_dbs_info->cur_policy->cur)
> >  			__cpufreq_driver_target(this_dbs_info->cur_policy,
> >  				policy->min, CPUFREQ_RELATION_L);
> > -		mutex_unlock(&dbs_mutex);
> >  		break;
> >  	}
> >  	return 0;
> > -- 
> > 1.6.0.2
> > 
> > --
> > To unsubscribe from this list: send the line "unsubscribe cpufreq" in
> > the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > 
> > _______________________________________________
> > stable mailing list
> > stable-CPWUtch7KCBzeIdxy0IIJw@public.gmane.org
> > http://linux.kernel.org/mailman/listinfo/stable

-- 
Mathieu Desnoyers
OpenPGP key fingerprint: 8CD5 52C3 8E3C 4140 715F  BA06 3F25 A8FE 3BAE 9A68

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [stable] [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
  2009-06-30 23:14         ` Mathieu Desnoyers
@ 2009-06-30 23:39           ` Greg KH
       [not found]             ` <20090630233912.GA3709-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 15+ messages in thread
From: Greg KH @ 2009-06-30 23:39 UTC (permalink / raw)
  To: Mathieu Desnoyers
  Cc: Thomas Renninger, stable-DgEjT+Ai2ygdnm+yROfE0A,
	hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	cpufreq-u79uwXL29TY76Z2rM5mHXA, rjw-KKrjLPT3xs0,
	penberg-bbCR+/B0CizivPeTLB3BmA, Venkatesh Pallipadi,
	davej-H+wXaHxf7aLQT0dZR+AlfA, mingo-X9Un+BFzKDI,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA

On Tue, Jun 30, 2009 at 07:14:52PM -0400, Mathieu Desnoyers wrote:
> * Greg KH (greg-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org) wrote:
> > I don't see the patch below in Linus's tree.  If it's there, what is the
> > git commit id?
> > 
> 
> As I pointed out in an earlier reply, this patch is bogus and adds racy
> data structure updates. It should not be merged.

Ok, dropped.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [stable] [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
       [not found]             ` <20090630233912.GA3709-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org>
@ 2009-07-01  9:07               ` Thomas Renninger
  0 siblings, 0 replies; 15+ messages in thread
From: Thomas Renninger @ 2009-07-01  9:07 UTC (permalink / raw)
  To: Greg KH
  Cc: Mathieu Desnoyers, stable-DgEjT+Ai2ygdnm+yROfE0A,
	hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	cpufreq-u79uwXL29TY76Z2rM5mHXA, rjw-KKrjLPT3xs0,
	penberg-bbCR+/B0CizivPeTLB3BmA, Venkatesh Pallipadi,
	davej-H+wXaHxf7aLQT0dZR+AlfA, mingo-X9Un+BFzKDI,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA

On Wednesday 01 July 2009 01:39:12 Greg KH wrote:
> On Tue, Jun 30, 2009 at 07:14:52PM -0400, Mathieu Desnoyers wrote:
> > * Greg KH (greg-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org) wrote:
> > > I don't see the patch below in Linus's tree.  If it's there, what is the
> > > git commit id?
> > > 
> > 
> > As I pointed out in an earlier reply, this patch is bogus and adds racy
> > data structure updates. It should not be merged.
> 
> Ok, dropped.

Yes, sorry for not mentioning.

I looked at it again, but gave up after a while, I am not able
to provide a safe .30 fix for that, risk of making things worse
is too high...
My last thought was that the main culprit is that .governor() should
always be called with the rwsem held. I look at it further and try
to ease up things for future kernels, but can't spent that much
time on it currently.

   Thomas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
       [not found]       ` <20090630063339.GF1351-+ZI9xUNit7I@public.gmane.org>
@ 2009-07-03 10:10         ` Thomas Renninger
  2009-07-05 19:46           ` Pavel Machek
  0 siblings, 1 reply; 15+ messages in thread
From: Thomas Renninger @ 2009-07-03 10:10 UTC (permalink / raw)
  To: Pavel Machek
  Cc: kernel-soeCzev1AWYdnm+yROfE0A, cpufreq-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA, mingo-X9Un+BFzKDI,
	rjw-KKrjLPT3xs0, hidave.darkstar-Re5JQEeQqe8AvxtiuMwx3w,
	penberg-bbCR+/B0CizivPeTLB3BmA,
	kernel-testers-u79uwXL29TY76Z2rM5mHXA,
	davej-H+wXaHxf7aLQT0dZR+AlfA,
	mathieu.desnoyers-scC8bbJcJLCw5LPnMra/2Q, Venkatesh Pallipadi

Hi Pavel,

On Tuesday 30 June 2009 08:33:39 Pavel Machek wrote:
> On Thu 2009-06-25 16:01:24, Thomas Renninger wrote:
> > Comment from Venkatesh:
> > ...
> > This mutex is just serializing the changes to those variables. I could't
> > think of any functionality issues of not having the lock as such.
> > 
> > -> rip it out.
> > 
> > CC: Venkatesh Pallipadi <venkatesh.pallipadi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> > Signed-off-by: Thomas Renninger <trenn-l3A5Bk7waGM@public.gmane.org>
> 
> >  static struct dbs_tuners {
> > @@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
> >  	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
> >  		return -EINVAL;
> >  
> > -	mutex_lock(&dbs_mutex);
> >  	dbs_tuners_ins.sampling_down_factor = input;
> > -	mutex_unlock(&dbs_mutex);
> > -
> 
> You'd need to make s_down_factor atomic_t for this to work....
Can you provide a userspace scenario (or tell which kind of event must
happen in between), that this would cause problems, please.

Thanks,

  Thomas

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors
  2009-07-03 10:10         ` Thomas Renninger
@ 2009-07-05 19:46           ` Pavel Machek
  0 siblings, 0 replies; 15+ messages in thread
From: Pavel Machek @ 2009-07-05 19:46 UTC (permalink / raw)
  To: Thomas Renninger
  Cc: kernel, cpufreq, linux-kernel, mingo, rjw, hidave.darkstar,
	penberg, kernel-testers, davej, mathieu.desnoyers,
	Venkatesh Pallipadi

On Fri 2009-07-03 12:10:15, Thomas Renninger wrote:
> Hi Pavel,
> 
> On Tuesday 30 June 2009 08:33:39 Pavel Machek wrote:
> > On Thu 2009-06-25 16:01:24, Thomas Renninger wrote:
> > > Comment from Venkatesh:
> > > ...
> > > This mutex is just serializing the changes to those variables. I could't
> > > think of any functionality issues of not having the lock as such.
> > > 
> > > -> rip it out.
> > > 
> > > CC: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
> > > Signed-off-by: Thomas Renninger <trenn@suse.de>
> > 
> > >  static struct dbs_tuners {
> > > @@ -236,10 +222,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
> > >  	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
> > >  		return -EINVAL;
> > >  
> > > -	mutex_lock(&dbs_mutex);
> > >  	dbs_tuners_ins.sampling_down_factor = input;
> > > -	mutex_unlock(&dbs_mutex);
> > > -
> > 
> > You'd need to make s_down_factor atomic_t for this to work....
> Can you provide a userspace scenario (or tell which kind of event must
> happen in between), that this would cause problems, please.


Imagine 

dbs_tuners_ins.sampling_down_factor = 0xd0000;
input = 0xabcd;

..then other threads can see 0xdabcd; if they read at "bad"
moment. Not on i386, but this is generic code (right?). Just use
atomic_t.
									Pavel   

-- 
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2009-07-05 19:46 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20090623193215.GA31374@elte.hu>
     [not found] ` <20090623193215.GA31374-X9Un+BFzKDI@public.gmane.org>
2009-06-25 14:01   ` Fix dead lock in cpufreq for CPU hotplug and suspend for 2.6.30.stable Thomas Renninger
     [not found]     ` <1245938485-12663-1-git-send-email-trenn-l3A5Bk7waGM@public.gmane.org>
2009-06-25 14:06       ` Thomas Renninger
2009-06-25 14:01 ` [PATCH 1/2] CPUFREQ: Remove unneeded dbs_mutexes from ondemand and conservative governors Thomas Renninger
     [not found]   ` <1245938485-12663-2-git-send-email-trenn-l3A5Bk7waGM@public.gmane.org>
2009-06-25 14:25     ` Mathieu Desnoyers
2009-06-25 15:03       ` Pallipadi, Venkatesh
2009-06-25 22:17       ` Thomas Renninger
     [not found]         ` <200906260017.10730.trenn-l3A5Bk7waGM@public.gmane.org>
2009-06-25 22:26           ` Thomas Renninger
2009-06-30  6:33     ` Pavel Machek
     [not found]       ` <20090630063339.GF1351-+ZI9xUNit7I@public.gmane.org>
2009-07-03 10:10         ` Thomas Renninger
2009-07-05 19:46           ` Pavel Machek
2009-06-30 22:58     ` [stable] " Greg KH
     [not found]       ` <20090630225813.GB2634-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org>
2009-06-30 23:14         ` Mathieu Desnoyers
2009-06-30 23:39           ` Greg KH
     [not found]             ` <20090630233912.GA3709-U8xfFu+wG4EAvxtiuMwx3w@public.gmane.org>
2009-07-01  9:07               ` Thomas Renninger
2009-06-25 14:01 ` [PATCH 2/2] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site) Thomas Renninger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).