X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=drivers%2Fcpufreq%2Fcpufreq_conservative.c;h=b3ebc8f019753db87d072914b1d699e9f36c85eb;hb=1eb68b990aab4c007e520acae39c74d8116693bc;hp=037f6bf4543c3c29b98e6401d8cb242b85a2f993;hpb=d1127e40e8d75cd3855e35424937c73d0bcec558;p=powerpc.git diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 037f6bf454..b3ebc8f019 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -72,6 +73,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ +/* + * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug + * lock and dbs_mutex. cpu_hotplug lock should always be held before + * dbs_mutex. If any function that can potentially take cpu_hotplug lock + * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then + * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock + * is recursive for the same process. -Venki + */ static DEFINE_MUTEX (dbs_mutex); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); @@ -176,8 +185,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input < 0 || - input <= dbs_tuners_ins.down_threshold) { + if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; } @@ -196,8 +204,7 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input < 0 || - input >= dbs_tuners_ins.up_threshold) { + if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; } @@ -416,12 +423,14 @@ static void dbs_check_cpu(int cpu) static void do_dbs_timer(void *data) { int i; + lock_cpu_hotplug(); mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); mutex_unlock(&dbs_mutex); + unlock_cpu_hotplug(); } static inline void dbs_timer_init(void) @@ -516,6 +525,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + lock_cpu_hotplug(); mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( @@ -526,6 +536,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); + unlock_cpu_hotplug(); break; } return 0;