Merge branch 'master' into upstream
[powerpc.git] / drivers / cpufreq / cpufreq_ondemand.c
index 69aa1db..3e6ffca 100644 (file)
@@ -74,6 +74,8 @@ static unsigned int dbs_enable;       /* number of CPUs using this policy */
 static DEFINE_MUTEX (dbs_mutex);
 static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
 
+static struct workqueue_struct *dbs_workq;
+
 struct dbs_tuners {
        unsigned int sampling_rate;
        unsigned int sampling_down_factor;
@@ -84,6 +86,7 @@ struct dbs_tuners {
 static struct dbs_tuners dbs_tuners_ins = {
        .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
        .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
+       .ignore_nice = 0,
 };
 
 static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@@ -350,6 +353,9 @@ static void dbs_check_cpu(int cpu)
        freq_next = (freq_next * policy->cur) /
                        (dbs_tuners_ins.up_threshold - 10);
 
+       if (freq_next < policy->min)
+               freq_next = policy->min;
+
        if (freq_next <= ((policy->cur * 95) / 100))
                __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
 }
@@ -360,23 +366,29 @@ static void do_dbs_timer(void *data)
        mutex_lock(&dbs_mutex);
        for_each_online_cpu(i)
                dbs_check_cpu(i);
-       schedule_delayed_work(&dbs_work,
-                       usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+       queue_delayed_work(dbs_workq, &dbs_work,
+                          usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        mutex_unlock(&dbs_mutex);
 }
 
 static inline void dbs_timer_init(void)
 {
        INIT_WORK(&dbs_work, do_dbs_timer, NULL);
-       schedule_delayed_work(&dbs_work,
-                       usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+       if (!dbs_workq)
+               dbs_workq = create_singlethread_workqueue("ondemand");
+       if (!dbs_workq) {
+               printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n");
+               return;
+       }
+       queue_delayed_work(dbs_workq, &dbs_work,
+                          usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        return;
 }
 
 static inline void dbs_timer_exit(void)
 {
-       cancel_delayed_work(&dbs_work);
-       return;
+       if (dbs_workq)
+               cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work);
 }
 
 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -395,8 +407,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        return -EINVAL;
 
                if (policy->cpuinfo.transition_latency >
-                               (TRANSITION_LATENCY_LIMIT * 1000))
+                               (TRANSITION_LATENCY_LIMIT * 1000)) {
+                       printk(KERN_WARNING "ondemand governor failed to load "
+                              "due to too long transition latency\n");
                        return -EINVAL;
+               }
                if (this_dbs_info->enable) /* Already enabled */
                        break;
 
@@ -431,8 +446,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                def_sampling_rate = MIN_STAT_SAMPLING_RATE;
 
                        dbs_tuners_ins.sampling_rate = def_sampling_rate;
-                       dbs_tuners_ins.ignore_nice = 0;
-
                        dbs_timer_init();
                }
 
@@ -484,8 +497,12 @@ static int __init cpufreq_gov_dbs_init(void)
 
 static void __exit cpufreq_gov_dbs_exit(void)
 {
-       /* Make sure that the scheduled work is indeed not running */
-       flush_scheduled_work();
+       /* Make sure that the scheduled work is indeed not running.
+          Assumes the timer has been cancelled first. */
+       if (dbs_workq) {
+               flush_workqueue(dbs_workq);
+               destroy_workqueue(dbs_workq);
+       }
 
        cpufreq_unregister_governor(&cpufreq_gov_dbs);
 }