aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcodeworkx <codeworkx@cyanogenmod.org>2013-02-19 17:37:40 +0000
committercodeworkx <codeworkx@cyanogenmod.org>2013-02-19 17:37:40 +0000
commit9ca9e302cafc02d26a54df4221e540fdd917ade6 (patch)
treeec24b9afaeaf6d7b186f36a95e0ab77616665e08
parentd19547a5b4bf019b90ec5674a7693c2927c530d2 (diff)
downloadkernel_samsung_espresso10-9ca9e302cafc02d26a54df4221e540fdd917ade6.tar.gz
kernel_samsung_espresso10-9ca9e302cafc02d26a54df4221e540fdd917ade6.tar.bz2
kernel_samsung_espresso10-9ca9e302cafc02d26a54df4221e540fdd917ade6.zip
cpufreq: interactive: bring inline with omapzoom
Change-Id: Ie2f8ac4ae210470532b7906daaad0e81108ec5f3
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c453
1 files changed, 347 insertions, 106 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 6df60b342c1..e0552468822 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -30,14 +30,15 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <asm/cputime.h>
-#ifdef CONFIG_HAS_EARLYSUSPEND
-#include <linux/earlysuspend.h>
-#endif
-
#define CREATE_TRACE_POINTS
#include <trace/events/cpufreq_interactive.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
static atomic_t active_count = ATOMIC_INIT(0);
struct cpufreq_interactive_cpuinfo {
@@ -56,6 +57,12 @@ struct cpufreq_interactive_cpuinfo {
u64 floor_validate_time;
u64 hispeed_validate_time;
int governor_enabled;
+ unsigned int *load_history;
+ unsigned int history_load_index;
+ unsigned int total_avg_load;
+ unsigned int total_load_history;
+ unsigned int low_power_rate_history;
+ unsigned int cpu_tune_value;
};
static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
@@ -70,21 +77,40 @@ static cpumask_t down_cpumask;
static spinlock_t down_cpumask_lock;
static struct mutex set_speed_lock;
+static struct workqueue_struct *tune_wq;
+static struct work_struct tune_work;
+static cpumask_t tune_cpumask;
+static spinlock_t tune_cpumask_lock;
+
+static unsigned int sampling_periods;
+static unsigned int low_power_threshold;
+static unsigned int hi_perf_threshold;
+static unsigned int low_power_rate;
+static enum tune_values {
+ LOW_POWER_TUNE = 0,
+ DEFAULT_TUNE,
+ HIGH_PERF_TUNE
+} cur_tune_value;
+
+#define MIN_GO_HISPEED_LOAD 70
+#define DEFAULT_LOW_POWER_RATE 10
+
+/* default number of sampling periods to average before hotplug-in decision */
+#define DEFAULT_SAMPLING_PERIODS 10
+#define DEFAULT_HI_PERF_THRESHOLD 80
+#define DEFAULT_LOW_POWER_THRESHOLD 35
+#define MAX_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+
/* Hi speed to bump to from lo speed when load burst (default max) */
static u64 hispeed_freq;
/* Go to hi speed when CPU load at or above this value. */
-#define DEFAULT_GO_HISPEED_LOAD 85
+#define DEFAULT_GO_HISPEED_LOAD 95
static unsigned long go_hispeed_load;
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static unsigned long screen_off_differential;
-#endif
-
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
*/
-#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+#define DEFAULT_MIN_SAMPLE_TIME (20 * USEC_PER_MSEC)
static unsigned long min_sample_time;
/*
@@ -92,6 +118,9 @@ static unsigned long min_sample_time;
*/
#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
static unsigned long timer_rate;
+#ifdef CONFIG_OMAP4_DPLL_CASCADING
+static unsigned long default_timer_rate;
+#endif
/*
* Wait this long before raising speed above hispeed, by default a single
@@ -132,6 +161,19 @@ struct cpufreq_governor cpufreq_gov_interactive = {
.owner = THIS_MODULE,
};
+#ifdef CONFIG_OMAP4_DPLL_CASCADING
+void cpufreq_interactive_set_timer_rate(unsigned long val, unsigned int reset)
+{
+ if (!reset) {
+ default_timer_rate = timer_rate;
+ timer_rate = val;
+ } else {
+ if (timer_rate == val)
+ timer_rate = default_timer_rate;
+ }
+}
+#endif
+
static void cpufreq_interactive_timer(unsigned long data)
{
unsigned int delta_idle;
@@ -143,8 +185,8 @@ static void cpufreq_interactive_timer(unsigned long data)
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, data);
u64 now_idle;
- unsigned int new_freq;
- unsigned int index;
+ unsigned int new_freq, new_tune_value;
+ unsigned int index, i, j;
unsigned long flags;
smp_rmb();
@@ -203,6 +245,63 @@ static void cpufreq_interactive_timer(unsigned long data)
*/
if (load_since_change > cpu_load)
cpu_load = load_since_change;
+ pcpu->load_history[pcpu->history_load_index] = cpu_load;
+
+ pcpu->total_load_history = 0;
+ pcpu->low_power_rate_history = 0;
+
+ /* compute average load across in & out sampling periods */
+ for (i = 0, j = pcpu->history_load_index;
+ i < sampling_periods; i++, j--) {
+ pcpu->total_load_history += pcpu->load_history[j];
+ if (low_power_rate < sampling_periods)
+ if (i < low_power_rate)
+ pcpu->low_power_rate_history
+ += pcpu->load_history[j];
+ if (j == 0)
+ j = sampling_periods;
+ }
+
+ /* return to first element if we're at the circular buffer's end */
+ if (++pcpu->history_load_index == sampling_periods)
+ pcpu->history_load_index = 0;
+ else if (unlikely(pcpu->history_load_index > sampling_periods)) {
+ /*
+ * This not supposed to happen.
+ * If we got here - means something is wrong.
+ */
+ pr_err("%s: have gone beyond allocated buffer of history!\n",
+ __func__);
+ pcpu->history_load_index = 0;
+ }
+
+ pcpu->total_avg_load = pcpu->total_load_history / sampling_periods;
+
+ if (pcpu->total_avg_load > hi_perf_threshold)
+ new_tune_value = HIGH_PERF_TUNE;
+ else if (pcpu->total_avg_load < low_power_threshold)
+ new_tune_value = LOW_POWER_TUNE;
+ else
+ new_tune_value = DEFAULT_TUNE;
+
+ if (new_tune_value != cur_tune_value)
+ if ((pcpu->cpu_tune_value != new_tune_value)
+ && ((new_tune_value == HIGH_PERF_TUNE)
+ || (new_tune_value == LOW_POWER_TUNE))) {
+ spin_lock_irqsave(&tune_cpumask_lock, flags);
+ cpumask_set_cpu(data, &tune_cpumask);
+ spin_unlock_irqrestore(&tune_cpumask_lock, flags);
+ queue_work(tune_wq, &tune_work);
+ }
+ pcpu->cpu_tune_value = new_tune_value;
+
+ if (cur_tune_value == LOW_POWER_TUNE) {
+ if (low_power_rate < sampling_periods)
+ cpu_load = pcpu->low_power_rate_history
+ / low_power_rate;
+ else
+ cpu_load = pcpu->total_avg_load;
+ }
if (cpu_load >= go_hispeed_load || boost_val) {
if (pcpu->target_freq <= pcpu->policy->min) {
@@ -225,7 +324,7 @@ static void cpufreq_interactive_timer(unsigned long data)
}
}
} else {
- new_freq = pcpu->policy->cur * cpu_load / 100;
+ new_freq = pcpu->policy->max * cpu_load / 100;
}
if (new_freq <= hispeed_freq)
@@ -317,6 +416,63 @@ exit:
return;
}
+static void cpufreq_interactive_tune(struct work_struct *work)
+{
+ unsigned int cpu;
+ cpumask_t tmp_mask;
+ unsigned long flags;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+
+ unsigned int max_total_avg_load = 0;
+ unsigned int index;
+
+ spin_lock_irqsave(&tune_cpumask_lock, flags);
+ tmp_mask = tune_cpumask;
+ cpumask_clear(&tune_cpumask);
+ spin_unlock_irqrestore(&tune_cpumask_lock, flags);
+
+ for_each_cpu(cpu, &tmp_mask) {
+ unsigned int j;
+
+ pcpu = &per_cpu(cpuinfo, cpu);
+ smp_rmb();
+
+ if (!pcpu->governor_enabled)
+ continue;
+
+ mutex_lock(&set_speed_lock);
+
+ for_each_cpu(j, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, j);
+
+ if (pjcpu->total_avg_load > max_total_avg_load)
+ max_total_avg_load = pjcpu->total_avg_load;
+ }
+
+ if ((max_total_avg_load > hi_perf_threshold)
+ && (cur_tune_value != HIGH_PERF_TUNE)) {
+ cur_tune_value = HIGH_PERF_TUNE;
+ go_hispeed_load = MIN_GO_HISPEED_LOAD;
+ min_sample_time = MAX_MIN_SAMPLE_TIME;
+ hispeed_freq = pcpu->policy->max;
+ } else if ((max_total_avg_load < low_power_threshold)
+ && (cur_tune_value != LOW_POWER_TUNE)) {
+ /* Boost down the performance */
+ go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+ min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+ cpufreq_frequency_table_target(pcpu->policy,
+ pcpu->freq_table, pcpu->policy->min,
+ CPUFREQ_RELATION_H, &index);
+ hispeed_freq =
+ pcpu->freq_table[index+1].frequency;
+ cur_tune_value = LOW_POWER_TUNE;
+ }
+ mutex_unlock(&set_speed_lock);
+ }
+
+}
+
static void cpufreq_interactive_idle_start(void)
{
struct cpufreq_interactive_cpuinfo *pcpu =
@@ -408,7 +564,6 @@ static int cpufreq_interactive_up_task(void *data)
unsigned long flags;
struct cpufreq_interactive_cpuinfo *pcpu;
- pr_info("[%s] start!!\n", __func__);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&up_cpumask_lock, flags);
@@ -417,10 +572,8 @@ static int cpufreq_interactive_up_task(void *data)
spin_unlock_irqrestore(&up_cpumask_lock, flags);
schedule();
- if (kthread_should_stop()) {
- pr_info("[%s] should stop!!\n", __func__);
+ if (kthread_should_stop())
break;
- }
spin_lock_irqsave(&up_cpumask_lock, flags);
}
@@ -555,7 +708,6 @@ static void cpufreq_interactive_input_event(struct input_handle *handle,
trace_cpufreq_interactive_boost("input");
cpufreq_interactive_boost();
}
-
}
static void cpufreq_interactive_input_open(struct work_struct *w)
@@ -678,31 +830,6 @@ static ssize_t store_go_hispeed_load(struct kobject *kobj,
static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
show_go_hispeed_load, store_go_hispeed_load);
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static ssize_t show_screen_off_differential(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- return sprintf(buf, "%lu\n", screen_off_differential);
-}
-
-static ssize_t store_screen_off_differential(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
-{
- int ret;
- unsigned long val;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- screen_off_differential = val;
- return count;
-}
-static struct global_attr screen_off_differential_attr =
- __ATTR(screen_off_differential,
- 0644, show_screen_off_differential,
- store_screen_off_differential);
-#endif
-
static ssize_t show_min_sample_time(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -835,45 +962,157 @@ static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
return count;
}
+static struct global_attr boostpulse =
+ __ATTR(boostpulse, 0200, NULL, store_boostpulse);
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static unsigned long save_go_hispeed_load;
-static void cpufreq_early_suspend(struct early_suspend *h)
+static ssize_t show_sampling_periods(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
- save_go_hispeed_load = go_hispeed_load;
- go_hispeed_load += screen_off_differential;
- return;
+ return sprintf(buf, "%u\n", sampling_periods);
}
-static void cpufreq_late_resume(struct early_suspend *h)
+static ssize_t store_sampling_periods(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
{
+ int ret;
+ unsigned int val, t_mask = 0;
+ unsigned int *temp;
+ unsigned int j, i;
+ struct cpufreq_interactive_cpuinfo *pcpu;
- go_hispeed_load = save_go_hispeed_load;
- return;
+ ret = sscanf(buf, "%u", &val);
+ if (ret != 1)
+ return ret;
+
+ if (val == sampling_periods)
+ return count;
+
+ mutex_lock(&set_speed_lock);
+
+ for_each_present_cpu(j) {
+ pcpu = &per_cpu(cpuinfo, j);
+ ret = del_timer_sync(&pcpu->cpu_timer);
+ if (ret)
+ t_mask |= BIT(j);
+ pcpu->history_load_index = 0;
+
+ temp = kmalloc((sizeof(unsigned int) * val), GFP_KERNEL);
+ if (!temp) {
+ pr_err("%s:can't allocate memory for history\n",
+ __func__);
+ count = -ENOMEM;
+ goto out;
+ }
+ memcpy(temp, pcpu->load_history,
+ (min(sampling_periods, val) * sizeof(unsigned int)));
+ if (val > sampling_periods)
+ for (i = sampling_periods; i < val; i++)
+ temp[i] = 50;
+
+ kfree(pcpu->load_history);
+ pcpu->load_history = temp;
+ }
+
+out:
+ if (!(count < 0 && val > sampling_periods))
+ sampling_periods = val;
+
+ for_each_online_cpu(j) {
+ pcpu = &per_cpu(cpuinfo, j);
+ if (t_mask & BIT(j))
+ mod_timer(&pcpu->cpu_timer,
+ jiffies + usecs_to_jiffies(timer_rate));
+ }
+
+ mutex_unlock(&set_speed_lock);
+
+ return count;
}
-static struct early_suspend interactive_early_suspend = {
- .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 5,
- .suspend = cpufreq_early_suspend,
- .resume = cpufreq_late_resume,
-};
-#endif
+static struct global_attr sampling_periods_attr = __ATTR(sampling_periods,
+ 0644, show_sampling_periods, store_sampling_periods);
+
+static ssize_t show_hi_perf_threshold(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", hi_perf_threshold);
+}
+
+static ssize_t store_hi_perf_threshold(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ hi_perf_threshold = val;
+ return count;
+}
+
+static struct global_attr hi_perf_threshold_attr = __ATTR(hi_perf_threshold,
+ 0644, show_hi_perf_threshold, store_hi_perf_threshold);
+
+
+static ssize_t show_low_power_threshold(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", low_power_threshold);
+}
+
+static ssize_t store_low_power_threshold(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ low_power_threshold = val;
+ return count;
+}
+
+static struct global_attr low_power_threshold_attr = __ATTR(low_power_threshold,
+ 0644, show_low_power_threshold, store_low_power_threshold);
+
+static ssize_t show_low_power_rate(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", low_power_rate);
+}
+
+static ssize_t store_low_power_rate(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ low_power_rate = val;
+ return count;
+}
+
+static struct global_attr low_power_rate_attr = __ATTR(low_power_rate,
+ 0644, show_low_power_rate, store_low_power_rate);
-static struct global_attr boostpulse =
- __ATTR(boostpulse, 0200, NULL, store_boostpulse);
static struct attribute *interactive_attributes[] = {
&hispeed_freq_attr.attr,
&go_hispeed_load_attr.attr,
-#ifdef CONFIG_HAS_EARLYSUSPEND
- &screen_off_differential_attr.attr,
-#endif
&above_hispeed_delay.attr,
&min_sample_time_attr.attr,
&timer_rate_attr.attr,
&input_boost.attr,
&boost.attr,
&boostpulse.attr,
+ &low_power_threshold_attr.attr,
+ &hi_perf_threshold_attr.attr,
+ &sampling_periods_attr.attr,
+ &low_power_rate_attr.attr,
NULL,
};
@@ -882,29 +1121,19 @@ static struct attribute_group interactive_attr_group = {
.name = "interactive",
};
-static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
- unsigned long val,
- void *data);
-static struct notifier_block cpufreq_interactive_idle_nb = {
- .notifier_call = cpufreq_interactive_idle_notifier,
-};
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event)
{
int rc;
- unsigned int j;
+ unsigned int j, i;
struct cpufreq_interactive_cpuinfo *pcpu;
struct cpufreq_frequency_table *freq_table;
- struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
switch (event) {
case CPUFREQ_GOV_START:
-
if (!cpu_online(policy->cpu))
return -EINVAL;
- pr_info("[%s] CPUFREQ_GOV_START\n", __func__);
-
freq_table =
cpufreq_frequency_get_table(policy->cpu);
@@ -922,6 +1151,14 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
pcpu->hispeed_validate_time =
pcpu->target_set_time;
pcpu->governor_enabled = 1;
+ pcpu->load_history = kmalloc(
+ (sizeof(unsigned int) * sampling_periods),
+ GFP_KERNEL);
+ if (!pcpu->load_history)
+ return -ENOMEM;
+ for (i = 0; i < sampling_periods; i++)
+ pcpu->load_history[i] = 0;
+ pcpu->history_load_index = 0;
smp_wmb();
}
@@ -935,14 +1172,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
if (atomic_inc_return(&active_count) > 1)
return 0;
- up_task = kthread_create(cpufreq_interactive_up_task, NULL,
- "kinteractiveup");
- if (IS_ERR(up_task))
- return PTR_ERR(up_task);
-
- sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
- get_task_struct(up_task);
-
rc = sysfs_create_group(cpufreq_global_kobject,
&interactive_attr_group);
if (rc)
@@ -953,17 +1182,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
pr_warn("%s: failed to register input handler\n",
__func__);
- idle_notifier_register(&cpufreq_interactive_idle_nb);
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- register_early_suspend(&interactive_early_suspend);
-#endif
break;
case CPUFREQ_GOV_STOP:
-
- idle_notifier_unregister(&cpufreq_interactive_idle_nb);
-
for_each_cpu(j, policy->cpus) {
pcpu = &per_cpu(cpuinfo, j);
pcpu->governor_enabled = 0;
@@ -977,9 +1198,12 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
* that is trying to run.
*/
pcpu->idle_exit_time = 0;
+ kfree(pcpu->load_history);
}
flush_work(&freq_scale_down_work);
+ flush_work(&tune_work);
+
if (atomic_dec_return(&active_count) > 0)
return 0;
@@ -987,17 +1211,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
sysfs_remove_group(cpufreq_global_kobject,
&interactive_attr_group);
- kthread_stop(up_task);
- put_task_struct(up_task);
- up_task = NULL;
- pr_info("[%s] CPUFREQ_GOV_STOP\n", __func__);
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- register_early_suspend(&interactive_early_suspend);
- if (save_go_hispeed_load)
- go_hispeed_load = save_go_hispeed_load;
- save_go_hispeed_load = 0;
-#endif
break;
case CPUFREQ_GOV_LIMITS:
@@ -1028,27 +1241,50 @@ static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
return 0;
}
+static struct notifier_block cpufreq_interactive_idle_nb = {
+ .notifier_call = cpufreq_interactive_idle_notifier,
+};
static int __init cpufreq_interactive_init(void)
{
unsigned int i;
struct cpufreq_interactive_cpuinfo *pcpu;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
timer_rate = DEFAULT_TIMER_RATE;
+#ifdef CONFIG_OMAP4_DPLL_CASCADING
+ default_timer_rate = DEFAULT_TIMER_RATE;
+#endif
+ sampling_periods = DEFAULT_SAMPLING_PERIODS;
+ hi_perf_threshold = DEFAULT_HI_PERF_THRESHOLD;
+ low_power_threshold = DEFAULT_LOW_POWER_THRESHOLD;
+ low_power_rate = DEFAULT_LOW_POWER_RATE;
+ cur_tune_value = DEFAULT_TUNE;
/* Initalize per-cpu timers */
for_each_possible_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
init_timer(&pcpu->cpu_timer);
pcpu->cpu_timer.function = cpufreq_interactive_timer;
pcpu->cpu_timer.data = i;
+ pcpu->cpu_tune_value = DEFAULT_TUNE;
}
+
+ up_task = kthread_create(cpufreq_interactive_up_task, NULL,
+ "kinteractiveup");
+ if (IS_ERR(up_task))
+ return PTR_ERR(up_task);
+
+ sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
+ get_task_struct(up_task);
+
/* No rescuer thread, bind to CPU queuing the work for possibly
warm cache (probably doesn't matter much). */
down_wq = alloc_workqueue("knteractive_down", 0, 1);
+ tune_wq = alloc_workqueue("knteractive_tune", 0, 1);
if (!down_wq)
goto err_freeuptask;
@@ -1056,14 +1292,20 @@ static int __init cpufreq_interactive_init(void)
INIT_WORK(&freq_scale_down_work,
cpufreq_interactive_freq_down);
+ INIT_WORK(&tune_work,
+ cpufreq_interactive_tune);
+
spin_lock_init(&up_cpumask_lock);
spin_lock_init(&down_cpumask_lock);
+ spin_lock_init(&tune_cpumask_lock);
mutex_init(&set_speed_lock);
+ idle_notifier_register(&cpufreq_interactive_idle_nb);
INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
return cpufreq_register_governor(&cpufreq_gov_interactive);
err_freeuptask:
+ put_task_struct(up_task);
return -ENOMEM;
}
@@ -1076,11 +1318,10 @@ module_init(cpufreq_interactive_init);
static void __exit cpufreq_interactive_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_interactive);
- if (up_task) {
- kthread_stop(up_task);
- put_task_struct(up_task);
- }
+ kthread_stop(up_task);
+ put_task_struct(up_task);
destroy_workqueue(down_wq);
+ destroy_workqueue(tune_wq);
}
module_exit(cpufreq_interactive_exit);