aboutsummaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r--drivers/cpufreq/cpufreq.c2236
1 files changed, 1383 insertions, 853 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c63a4382374..6f024852c6f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2001 Russell King
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
*
* Oct 2005 - Ashok Raj <ashok.raj@intel.com>
* Added handling for CPU hotplug
@@ -12,28 +13,23 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/notifier.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/cpu.h>
-#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
#include <trace/events/power.h>
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
- "cpufreq-core", msg)
-
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
@@ -41,67 +37,27 @@
*/
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
-#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
+static DEFINE_RWLOCK(cpufreq_driver_lock);
+DEFINE_MUTEX(cpufreq_governor_lock);
+static LIST_HEAD(cpufreq_policy_list);
+
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
-#endif
-static DEFINE_SPINLOCK(cpufreq_driver_lock);
-
-/*
- * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
- * all cpufreq/hotplug/workqueue/etc related lock issues.
- *
- * The rules for this semaphore:
- * - Any routine that wants to read from the policy structure will
- * do a down_read on this semaphore.
- * - Any routine that will write to the policy structure and/or may take away
- * the policy altogether (eg. CPU hotplug), will hold this lock in write
- * mode before doing so.
- *
- * Additional rules:
- * - All holders of the lock should check to make sure that the CPU they
- * are concerned with are online after they get the lock.
- * - Governor routines that can be called in cpufreq hotplug path should not
- * take this sem as top level hotplug notifier handler takes this.
- * - Lock should not be held across
- * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
- */
-static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
-static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
-
-#define lock_policy_rwsem(mode, cpu) \
-static int lock_policy_rwsem_##mode \
-(int cpu) \
-{ \
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
- BUG_ON(policy_cpu == -1); \
- down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
- if (unlikely(!cpu_online(cpu))) { \
- up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
- return -1; \
- } \
- \
- return 0; \
-}
-
-lock_policy_rwsem(read, cpu);
-lock_policy_rwsem(write, cpu);
+/* Flag to suspend/resume CPUFreq governors */
+static bool cpufreq_suspended;
-static void unlock_policy_rwsem_read(int cpu)
+static inline bool has_target(void)
{
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
- BUG_ON(policy_cpu == -1);
- up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
-}
-
-static void unlock_policy_rwsem_write(int cpu)
-{
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
- BUG_ON(policy_cpu == -1);
- up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
+ return cpufreq_driver->target_index || cpufreq_driver->target;
}
+/*
+ * rwsem to guarantee that cpufreq driver module doesn't unload during critical
+ * sections
+ */
+static DECLARE_RWSEM(cpufreq_rwsem);
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
@@ -128,143 +84,158 @@ static int __init init_cpufreq_transition_notifier_list(void)
}
pure_initcall(init_cpufreq_transition_notifier_list);
+static int off __read_mostly;
+static int cpufreq_disabled(void)
+{
+ return off;
+}
+void disable_cpufreq(void)
+{
+ off = 1;
+}
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+bool have_governor_per_policy(void)
{
- struct cpufreq_policy *data;
- unsigned long flags;
-
- if (cpu >= nr_cpu_ids)
- goto err_out;
+ return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
+}
+EXPORT_SYMBOL_GPL(have_governor_per_policy);
- /* get the cpufreq driver */
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+ if (have_governor_per_policy())
+ return &policy->kobj;
+ else
+ return cpufreq_global_kobject;
+}
+EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
- if (!cpufreq_driver)
- goto err_out_unlock;
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
- if (!try_module_get(cpufreq_driver->owner))
- goto err_out_unlock;
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- /* get the CPU */
- data = per_cpu(cpufreq_cpu_data, cpu);
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = cputime_to_usecs(cur_wall_time);
- if (!data)
- goto err_out_put_module;
+ return cputime_to_usecs(idle_time);
+}
- if (!kobject_get(&data->kobj))
- goto err_out_put_module;
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return data;
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else if (!io_busy)
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
-err_out_put_module:
- module_put(cpufreq_driver->owner);
-err_out_unlock:
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-err_out:
- return NULL;
+ return idle_time;
}
-EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
-
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
-void cpufreq_cpu_put(struct cpufreq_policy *data)
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency)
{
- kobject_put(&data->kobj);
- module_put(cpufreq_driver->owner);
-}
-EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-
+ int ret;
-/*********************************************************************
- * UNIFIED DEBUG HELPERS *
- *********************************************************************/
-#ifdef CONFIG_CPU_FREQ_DEBUG
+ ret = cpufreq_table_validate_and_show(policy, table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ return ret;
+ }
-/* what part(s) of the CPUfreq subsystem are debugged? */
-static unsigned int debug;
+ policy->cpuinfo.transition_latency = transition_latency;
-/* is the debug output ratelimit'ed using printk_ratelimit? User can
- * set or modify this value.
- */
-static unsigned int debug_ratelimit = 1;
+ /*
+ * The driver only supports the SMP configuartion where all processors
+ * share the clock and voltage and clock.
+ */
+ cpumask_setall(policy->cpus);
-/* is the printk_ratelimit'ing enabled? It's enabled after a successful
- * loading of a cpufreq driver, temporarily disabled when a new policy
- * is set, and disabled upon cpufreq driver removal
- */
-static unsigned int disable_ratelimit = 1;
-static DEFINE_SPINLOCK(disable_ratelimit_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
-static void cpufreq_debug_enable_ratelimit(void)
+unsigned int cpufreq_generic_get(unsigned int cpu)
{
- unsigned long flags;
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
- spin_lock_irqsave(&disable_ratelimit_lock, flags);
- if (disable_ratelimit)
- disable_ratelimit--;
- spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
+ if (!policy || IS_ERR(policy->clk)) {
+ pr_err("%s: No %s associated to cpu: %d\n",
+ __func__, policy ? "clk" : "policy", cpu);
+ return 0;
+ }
+
+ return clk_get_rate(policy->clk) / 1000;
}
+EXPORT_SYMBOL_GPL(cpufreq_generic_get);
-static void cpufreq_debug_disable_ratelimit(void)
+/* Only for cpufreq core internal use */
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
- unsigned long flags;
-
- spin_lock_irqsave(&disable_ratelimit_lock, flags);
- disable_ratelimit++;
- spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
+ return per_cpu(cpufreq_cpu_data, cpu);
}
-void cpufreq_debug_printk(unsigned int type, const char *prefix,
- const char *fmt, ...)
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
- char s[256];
- va_list args;
- unsigned int len;
+ struct cpufreq_policy *policy = NULL;
unsigned long flags;
- WARN_ON(!prefix);
- if (type & debug) {
- spin_lock_irqsave(&disable_ratelimit_lock, flags);
- if (!disable_ratelimit && debug_ratelimit
- && !printk_ratelimit()) {
- spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
-
- len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
+ if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
+ return NULL;
- va_start(args, fmt);
- len += vsnprintf(&s[len], (256 - len), fmt, args);
- va_end(args);
+ if (!down_read_trylock(&cpufreq_rwsem))
+ return NULL;
- printk(s);
+ /* get the cpufreq driver */
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
- WARN_ON(len < 5);
+ if (cpufreq_driver) {
+ /* get the CPU */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy)
+ kobject_get(&policy->kobj);
}
-}
-EXPORT_SYMBOL(cpufreq_debug_printk);
-
-module_param(debug, uint, 0644);
-MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
- " 2 to debug drivers, and 4 to debug governors.");
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-module_param(debug_ratelimit, uint, 0644);
-MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
- " set to 0 to disable ratelimiting.");
-
-#else /* !CONFIG_CPU_FREQ_DEBUG */
+ if (!policy)
+ up_read(&cpufreq_rwsem);
-static inline void cpufreq_debug_enable_ratelimit(void) { return; }
-static inline void cpufreq_debug_disable_ratelimit(void) { return; }
+ return policy;
+}
+EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
-#endif /* CONFIG_CPU_FREQ_DEBUG */
+void cpufreq_cpu_put(struct cpufreq_policy *policy)
+{
+ if (cpufreq_disabled())
+ return;
+ kobject_put(&policy->kobj);
+ up_read(&cpufreq_rwsem);
+}
+EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
@@ -280,7 +251,7 @@ static inline void cpufreq_debug_disable_ratelimit(void) { return; }
*/
#ifndef CONFIG_SMP
static unsigned long l_p_j_ref;
-static unsigned int l_p_j_ref_freq;
+static unsigned int l_p_j_ref_freq;
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
@@ -290,16 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
- dprintk("saving %lu as reference value for loops_per_jiffy; "
- "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
+ pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
+ l_p_j_ref, l_p_j_ref_freq);
}
- if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
- (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
- (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+ if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
- dprintk("scaling loops_per_jiffy to %lu "
- "for frequency %u kHz\n", loops_per_jiffy, ci->new);
+ pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
+ loops_per_jiffy, ci->new);
}
}
#else
@@ -309,26 +278,18 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
}
#endif
-
-/**
- * cpufreq_notify_transition - call notifier chain and adjust_jiffies
- * on frequency transition.
- *
- * This function calls the transition notifiers and the "adjust_jiffies"
- * function. It is called twice on all CPU frequency changes that have
- * external effects.
- */
-void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
+static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state)
{
- struct cpufreq_policy *policy;
-
BUG_ON(irqs_disabled());
+ if (cpufreq_disabled())
+ return;
+
freqs->flags = cpufreq_driver->flags;
- dprintk("notification %u of frequency transition to %u kHz\n",
- state, freqs->new);
+ pr_debug("notification %u of frequency transition to %u kHz\n",
+ state, freqs->new);
- policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
switch (state) {
case CPUFREQ_PRECHANGE:
@@ -339,9 +300,8 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
if ((policy) && (policy->cpu == freqs->cpu) &&
(policy->cur) && (policy->cur != freqs->old)) {
- dprintk("Warning: CPU frequency is"
- " %u, cpufreq assumed %u kHz.\n",
- freqs->old, policy->cur);
+ pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
+ freqs->old, policy->cur);
freqs->old = policy->cur;
}
}
@@ -352,9 +312,9 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
- dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
- (unsigned long)freqs->cpu);
- trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
+ pr_debug("FREQ: %lu - CPU: %lu\n",
+ (unsigned long)freqs->new, (unsigned long)freqs->cpu);
+ trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -362,13 +322,115 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
break;
}
}
-EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
+/**
+ * cpufreq_notify_transition - call notifier chain and adjust_jiffies
+ * on frequency transition.
+ *
+ * This function calls the transition notifiers and the "adjust_jiffies"
+ * function. It is called twice on all CPU frequency changes that have
+ * external effects.
+ */
+static void cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state)
+{
+ for_each_cpu(freqs->cpu, policy->cpus)
+ __cpufreq_notify_transition(policy, freqs, state);
+}
+
+/* Do post notifications when there are chances that transition has failed */
+static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+ if (!transition_failed)
+ return;
+
+ swap(freqs->old, freqs->new);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+}
+
+void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs)
+{
+
+ /*
+ * Catch double invocations of _begin() which lead to self-deadlock.
+ * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
+ * doesn't invoke _begin() on their behalf, and hence the chances of
+ * double invocations are very low. Moreover, there are scenarios
+ * where these checks can emit false-positive warnings in these
+ * drivers; so we avoid that by skipping them altogether.
+ */
+ WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
+ && current == policy->transition_task);
+
+wait:
+ wait_event(policy->transition_wait, !policy->transition_ongoing);
+
+ spin_lock(&policy->transition_lock);
+
+ if (unlikely(policy->transition_ongoing)) {
+ spin_unlock(&policy->transition_lock);
+ goto wait;
+ }
+
+ policy->transition_ongoing = true;
+ policy->transition_task = current;
+
+ spin_unlock(&policy->transition_lock);
+
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
+
+void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ if (unlikely(WARN_ON(!policy->transition_ongoing)))
+ return;
+
+ cpufreq_notify_post_transition(policy, freqs, transition_failed);
+
+ policy->transition_ongoing = false;
+ policy->transition_task = NULL;
+
+ wake_up(&policy->transition_wait);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
+static ssize_t show_boost(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+}
+
+static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, enable;
+
+ ret = sscanf(buf, "%d", &enable);
+ if (ret != 1 || enable < 0 || enable > 1)
+ return -EINVAL;
+
+ if (cpufreq_boost_trigger_state(enable)) {
+ pr_err("%s: Cannot %s BOOST!\n",
+ __func__, enable ? "enable" : "disable");
+ return -EINVAL;
+ }
+
+ pr_debug("%s: cpufreq BOOST %s\n",
+ __func__, enable ? "enabled" : "disabled");
+
+ return count;
+}
+define_one_global_rw(boost);
static struct cpufreq_governor *__find_governor(const char *str_governor)
{
@@ -401,7 +463,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
}
- } else if (cpufreq_driver->target) {
+ } else if (has_target()) {
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
@@ -409,21 +471,14 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
t = __find_governor(str_governor);
if (t == NULL) {
- char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
- str_governor);
+ int ret;
- if (name) {
- int ret;
-
- mutex_unlock(&cpufreq_governor_mutex);
- ret = request_module("%s", name);
- mutex_lock(&cpufreq_governor_mutex);
-
- if (ret == 0)
- t = __find_governor(str_governor);
- }
+ mutex_unlock(&cpufreq_governor_mutex);
+ ret = request_module("cpufreq_%s", str_governor);
+ mutex_lock(&cpufreq_governor_mutex);
- kfree(name);
+ if (ret == 0)
+ t = __find_governor(str_governor);
}
if (t != NULL) {
@@ -437,7 +492,6 @@ out:
return err;
}
-
/**
* cpufreq_per_cpu_attr_read() / show_##file_name() -
* print out cpufreq information
@@ -460,8 +514,8 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-static int __cpufreq_set_policy(struct cpufreq_policy *data,
- struct cpufreq_policy *policy);
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy);
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
@@ -470,7 +524,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
static ssize_t store_##file_name \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
- unsigned int ret = -EINVAL; \
+ int ret; \
struct cpufreq_policy new_policy; \
\
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -481,7 +535,7 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
- ret = __cpufreq_set_policy(policy, &new_policy); \
+ ret = cpufreq_set_policy(policy, &new_policy); \
policy->user_policy.object = policy->object; \
\
return ret ? ret : count; \
@@ -502,7 +556,6 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
return sprintf(buf, "%u\n", cur_freq);
}
-
/**
* show_scaling_governor - show the current policy for the specified CPU
*/
@@ -513,19 +566,18 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
return sprintf(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
policy->governor->name);
return -EINVAL;
}
-
/**
* store_scaling_governor - store policy for the specified CPU
*/
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- unsigned int ret = -EINVAL;
+ int ret;
char str_governor[16];
struct cpufreq_policy new_policy;
@@ -541,9 +593,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
&new_policy.governor))
return -EINVAL;
- /* Do not use cpufreq_set_policy here or the user_policy.max
- will be wrongly overridden */
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
@@ -559,7 +609,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
*/
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
{
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
}
/**
@@ -571,7 +621,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
ssize_t i = 0;
struct cpufreq_governor *t;
- if (!cpufreq_driver->target) {
+ if (!has_target()) {
i += sprintf(buf, "performance powersave");
goto out;
}
@@ -580,14 +630,14 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
+ i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
}
out:
i += sprintf(&buf[i], "\n");
return i;
}
-static ssize_t show_cpus(const struct cpumask *mask, char *buf)
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
{
ssize_t i = 0;
unsigned int cpu;
@@ -602,6 +652,7 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
i += sprintf(&buf[i], "\n");
return i;
}
+EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
/**
* show_related_cpus - show the CPUs affected by each transition even if
@@ -609,9 +660,7 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
*/
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
- if (cpumask_empty(policy->related_cpus))
- return show_cpus(policy->cpus, buf);
- return show_cpus(policy->related_cpus, buf);
+ return cpufreq_show_cpus(policy->related_cpus, buf);
}
/**
@@ -619,7 +668,7 @@ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
*/
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
{
- return show_cpus(policy->cpus, buf);
+ return cpufreq_show_cpus(policy->cpus, buf);
}
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
@@ -649,7 +698,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
}
/**
- * show_scaling_driver - show the current cpufreq HW/BIOS limitation
+ * show_bios_limit - show the current cpufreq HW/BIOS limitation
*/
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
@@ -693,9 +742,6 @@ static struct attribute *default_attrs[] = {
NULL
};
-struct kobject *cpufreq_global_kobject;
-EXPORT_SYMBOL(cpufreq_global_kobject);
-
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr)
@@ -703,23 +749,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
- if (!policy)
- goto no_policy;
+ ssize_t ret;
- if (lock_policy_rwsem_read(policy->cpu) < 0)
- goto fail;
+ if (!down_read_trylock(&cpufreq_rwsem))
+ return -EINVAL;
+
+ down_read(&policy->rwsem);
if (fattr->show)
ret = fattr->show(policy, buf);
else
ret = -EIO;
- unlock_policy_rwsem_read(policy->cpu);
-fail:
- cpufreq_cpu_put(policy);
-no_policy:
+ up_read(&policy->rwsem);
+ up_read(&cpufreq_rwsem);
+
return ret;
}
@@ -729,29 +773,35 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
- if (!policy)
- goto no_policy;
- if (lock_policy_rwsem_write(policy->cpu) < 0)
- goto fail;
+ get_online_cpus();
+
+ if (!cpu_online(policy->cpu))
+ goto unlock;
+
+ if (!down_read_trylock(&cpufreq_rwsem))
+ goto unlock;
+
+ down_write(&policy->rwsem);
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
- unlock_policy_rwsem_write(policy->cpu);
-fail:
- cpufreq_cpu_put(policy);
-no_policy:
+ up_write(&policy->rwsem);
+
+ up_read(&cpufreq_rwsem);
+unlock:
+ put_online_cpus();
+
return ret;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
{
struct cpufreq_policy *policy = to_policy(kobj);
- dprintk("last reference is dropped\n");
+ pr_debug("last reference is dropped\n");
complete(&policy->kobj_unregister);
}
@@ -766,129 +816,80 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
-/*
- * Returns:
- * Negative: Failure
- * 0: Success
- * Positive: When we have a managed CPU and the sysfs got symlinked
- */
-static int cpufreq_add_dev_policy(unsigned int cpu,
- struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
-{
- int ret = 0;
-#ifdef CONFIG_SMP
- unsigned long flags;
- unsigned int j;
-#ifdef CONFIG_HOTPLUG_CPU
- struct cpufreq_governor *gov;
+struct kobject *cpufreq_global_kobject;
+EXPORT_SYMBOL(cpufreq_global_kobject);
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
- if (gov) {
- policy->governor = gov;
- dprintk("Restoring governor %s for cpu %d\n",
- policy->governor->name, cpu);
- }
-#endif
+static int cpufreq_global_kobject_usage;
- for_each_cpu(j, policy->cpus) {
- struct cpufreq_policy *managed_policy;
+int cpufreq_get_global_kobject(void)
+{
+ if (!cpufreq_global_kobject_usage++)
+ return kobject_add(cpufreq_global_kobject,
+ &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
- if (cpu == j)
- continue;
+ return 0;
+}
+EXPORT_SYMBOL(cpufreq_get_global_kobject);
- /* Check for existing affected CPUs.
- * They may not be aware of it due to CPU Hotplug.
- * cpufreq_cpu_put is called when the device is removed
- * in __cpufreq_remove_dev()
- */
- managed_policy = cpufreq_cpu_get(j);
- if (unlikely(managed_policy)) {
-
- /* Set proper policy_cpu */
- unlock_policy_rwsem_write(cpu);
- per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
-
- if (lock_policy_rwsem_write(cpu) < 0) {
- /* Should not go through policy unlock path */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
- cpufreq_cpu_put(managed_policy);
- return -EBUSY;
- }
+void cpufreq_put_global_kobject(void)
+{
+ if (!--cpufreq_global_kobject_usage)
+ kobject_del(cpufreq_global_kobject);
+}
+EXPORT_SYMBOL(cpufreq_put_global_kobject);
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpumask_copy(managed_policy->cpus, policy->cpus);
- per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- dprintk("CPU already managed, adding link\n");
- ret = sysfs_create_link(&sys_dev->kobj,
- &managed_policy->kobj,
- "cpufreq");
- if (ret)
- cpufreq_cpu_put(managed_policy);
- /*
- * Success. We only needed to be added to the mask.
- * Call driver->exit() because only the cpu parent of
- * the kobj needed to call init().
- */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
+int cpufreq_sysfs_create_file(const struct attribute *attr)
+{
+ int ret = cpufreq_get_global_kobject();
- if (!ret)
- return 1;
- else
- return ret;
- }
+ if (!ret) {
+ ret = sysfs_create_file(cpufreq_global_kobject, attr);
+ if (ret)
+ cpufreq_put_global_kobject();
}
-#endif
+
return ret;
}
+EXPORT_SYMBOL(cpufreq_sysfs_create_file);
+void cpufreq_sysfs_remove_file(const struct attribute *attr)
+{
+ sysfs_remove_file(cpufreq_global_kobject, attr);
+ cpufreq_put_global_kobject();
+}
+EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
/* symlink affected CPUs */
-static int cpufreq_add_dev_symlink(unsigned int cpu,
- struct cpufreq_policy *policy)
+static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
{
unsigned int j;
int ret = 0;
for_each_cpu(j, policy->cpus) {
- struct cpufreq_policy *managed_policy;
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
- if (j == cpu)
- continue;
- if (!cpu_online(j))
+ if (j == policy->cpu)
continue;
- dprintk("CPU %u already managed, adding link\n", j);
- managed_policy = cpufreq_cpu_get(cpu);
- cpu_sys_dev = get_cpu_sysdev(j);
- ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
+ pr_debug("Adding link for CPU: %u\n", j);
+ cpu_dev = get_cpu_device(j);
+ ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
- if (ret) {
- cpufreq_cpu_put(managed_policy);
- return ret;
- }
+ if (ret)
+ break;
}
return ret;
}
-static int cpufreq_add_dev_interface(unsigned int cpu,
- struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
+ struct device *dev)
{
- struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
- unsigned long flags;
int ret = 0;
- unsigned int j;
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
- &sys_dev->kobj, "cpufreq");
+ &dev->kobj, "cpufreq");
if (ret)
return ret;
@@ -905,7 +906,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
if (ret)
goto err_out_kobj_put;
}
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
goto err_out_kobj_put;
@@ -916,67 +917,196 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
goto err_out_kobj_put;
}
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu(j, policy->cpus) {
- if (!cpu_online(j))
- continue;
- per_cpu(cpufreq_cpu_data, j) = policy;
- per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- ret = cpufreq_add_dev_symlink(cpu, policy);
+ ret = cpufreq_add_dev_symlink(policy);
if (ret)
goto err_out_kobj_put;
- memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
- /* assure that the starting sequence is run in __cpufreq_set_policy */
- policy->governor = NULL;
+ return ret;
- /* set default policy */
- ret = __cpufreq_set_policy(policy, &new_policy);
- policy->user_policy.policy = policy->policy;
- policy->user_policy.governor = policy->governor;
+err_out_kobj_put:
+ kobject_put(&policy->kobj);
+ wait_for_completion(&policy->kobj_unregister);
+ return ret;
+}
+
+static void cpufreq_init_policy(struct cpufreq_policy *policy)
+{
+ struct cpufreq_governor *gov = NULL;
+ struct cpufreq_policy new_policy;
+ int ret = 0;
+
+ memcpy(&new_policy, policy, sizeof(*policy));
+
+ /* Update governor of new_policy to the governor used before hotplug */
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+ if (gov)
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, policy->cpu);
+ else
+ gov = CPUFREQ_DEFAULT_GOVERNOR;
+
+ new_policy.governor = gov;
+ /* Use the default policy if its valid. */
+ if (cpufreq_driver->setpolicy)
+ cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
+
+ /* set default policy */
+ ret = cpufreq_set_policy(policy, &new_policy);
if (ret) {
- dprintk("setting policy failed\n");
+ pr_debug("setting policy failed\n");
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
}
- return ret;
+}
-err_out_kobj_put:
- kobject_put(&policy->kobj);
- wait_for_completion(&policy->kobj_unregister);
- return ret;
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
+ unsigned int cpu, struct device *dev)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (ret) {
+ pr_err("%s: Failed to stop governor\n", __func__);
+ return ret;
+ }
+ }
+
+ down_write(&policy->rwsem);
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ cpumask_set_cpu(cpu, policy->cpus);
+ per_cpu(cpufreq_cpu_data, cpu) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ up_write(&policy->rwsem);
+
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ if (ret) {
+ pr_err("%s: Failed to start governor\n", __func__);
+ return ret;
+ }
+ }
+
+ return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
}
+#endif
+static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ unsigned long flags;
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct sys_device *sys_dev)
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
+
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ policy->governor = NULL;
+
+ return policy;
+}
+
+static struct cpufreq_policy *cpufreq_policy_alloc(void)
{
- unsigned int cpu = sys_dev->id;
- int ret = 0, found = 0;
+ struct cpufreq_policy *policy;
+
+ policy = kzalloc(sizeof(*policy), GFP_KERNEL);
+ if (!policy)
+ return NULL;
+
+ if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
+ goto err_free_policy;
+
+ if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
+ goto err_free_cpumask;
+
+ INIT_LIST_HEAD(&policy->policy_list);
+ init_rwsem(&policy->rwsem);
+ spin_lock_init(&policy->transition_lock);
+ init_waitqueue_head(&policy->transition_wait);
+
+ return policy;
+
+err_free_cpumask:
+ free_cpumask_var(policy->cpus);
+err_free_policy:
+ kfree(policy);
+
+ return NULL;
+}
+
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+ struct kobject *kobj;
+ struct completion *cmp;
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_REMOVE_POLICY, policy);
+
+ down_read(&policy->rwsem);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ up_read(&policy->rwsem);
+ kobject_put(kobj);
+
+ /*
+ * We need to make sure that the underlying kobj is
+ * actually not referenced anymore by anybody before we
+ * proceed with unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
+}
+
+static void cpufreq_policy_free(struct cpufreq_policy *policy)
+{
+ free_cpumask_var(policy->related_cpus);
+ free_cpumask_var(policy->cpus);
+ kfree(policy);
+}
+
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ if (WARN_ON(cpu == policy->cpu))
+ return;
+
+ down_write(&policy->rwsem);
+
+ policy->last_cpu = policy->cpu;
+ policy->cpu = cpu;
+
+ up_write(&policy->rwsem);
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_UPDATE_POLICY_CPU, policy);
+}
+
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+ unsigned int j, cpu = dev->id;
+ int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
- unsigned int j;
+ bool recover_policy = cpufreq_suspended;
#ifdef CONFIG_HOTPLUG_CPU
- int sibling;
+ struct cpufreq_policy *tpolicy;
#endif
if (cpu_is_offline(cpu))
return 0;
- cpufreq_debug_disable_ratelimit();
- dprintk("adding CPU %u\n", cpu);
+ pr_debug("adding CPU %u\n", cpu);
#ifdef CONFIG_SMP
/* check whether a different CPU already registered this
@@ -984,265 +1114,389 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
policy = cpufreq_cpu_get(cpu);
if (unlikely(policy)) {
cpufreq_cpu_put(policy);
- cpufreq_debug_enable_ratelimit();
return 0;
}
#endif
- if (!try_module_get(cpufreq_driver->owner)) {
- ret = -EINVAL;
- goto module_out;
- }
+ if (!down_read_trylock(&cpufreq_rwsem))
+ return 0;
- ret = -ENOMEM;
- policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
- if (!policy)
- goto nomem_out;
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Check if this cpu was hot-unplugged earlier and has siblings */
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
+ if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
+ up_read(&cpufreq_rwsem);
+ return ret;
+ }
+ }
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+#endif
- if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
- goto err_free_policy;
+ /*
+ * Restore the saved policy when doing light-weight init and fall back
+ * to the full init if that fails.
+ */
+ policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
+ if (!policy) {
+ recover_policy = false;
+ policy = cpufreq_policy_alloc();
+ if (!policy)
+ goto nomem_out;
+ }
- if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
- goto err_free_cpumask;
+ /*
+ * In the resume path, since we restore a saved policy, the assignment
+ * to policy->cpu is like an update of the existing policy, rather than
+ * the creation of a brand new one. So we need to perform this update
+ * by invoking update_policy_cpu().
+ */
+ if (recover_policy && cpu != policy->cpu) {
+ update_policy_cpu(policy, cpu);
+ WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
+ } else {
+ policy->cpu = cpu;
+ }
- policy->cpu = cpu;
cpumask_copy(policy->cpus, cpumask_of(cpu));
- /* Initially set CPU itself as the policy_cpu */
- per_cpu(cpufreq_policy_cpu, cpu) = cpu;
- ret = (lock_policy_rwsem_write(cpu) < 0);
- WARN_ON(ret);
-
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
- /* Set governor before ->init, so that driver could check it */
-#ifdef CONFIG_HOTPLUG_CPU
- for_each_online_cpu(sibling) {
- struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
- if (cp && cp->governor &&
- (cpumask_test_cpu(cpu, cp->related_cpus))) {
- policy->governor = cp->governor;
- found = 1;
- break;
- }
- }
-#endif
- if (!found)
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret = cpufreq_driver->init(policy);
if (ret) {
- dprintk("initialization failed\n");
- goto err_unlock_policy;
+ pr_debug("initialization failed\n");
+ goto err_set_policy_cpu;
+ }
+
+ /* related cpus should atleast have policy->cpus */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
+ if (!recover_policy) {
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+ }
+
+ down_write(&policy->rwsem);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ policy->cur = cpufreq_driver->get(policy->cpu);
+ if (!policy->cur) {
+ pr_err("%s: ->get() failed\n", __func__);
+ goto err_get_freq;
+ }
+ }
+
+ /*
+ * Sometimes boot loaders set CPU frequency to a value outside of
+ * frequency table present with cpufreq core. In such cases CPU might be
+ * unstable if it has to run on that frequency for long duration of time
+ * and so its better to set it to a frequency which is specified in
+ * freq-table. This also makes cpufreq stats inconsistent as
+ * cpufreq-stats would fail to register because current frequency of CPU
+ * isn't found in freq-table.
+ *
+ * Because we don't want this change to effect boot process badly, we go
+ * for the next freq which is >= policy->cur ('cur' must be set by now,
+ * otherwise we will end up setting freq to lowest of the table as 'cur'
+ * is initialized to zero).
+ *
+ * We are passing target-freq as "policy->cur - 1" otherwise
+ * __cpufreq_driver_target() would simply fail, as policy->cur will be
+ * equal to target-freq.
+ */
+ if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
+ && has_target()) {
+ /* Are we running at unknown frequency ? */
+ ret = cpufreq_frequency_table_get_index(policy, policy->cur);
+ if (ret == -EINVAL) {
+ /* Warn user and fix it */
+ pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
+ __func__, policy->cpu, policy->cur);
+ ret = __cpufreq_driver_target(policy, policy->cur - 1,
+ CPUFREQ_RELATION_L);
+
+ /*
+ * Reaching here after boot in a few seconds may not
+ * mean that system will remain stable at "unknown"
+ * frequency for longer duration. Hence, a BUG_ON().
+ */
+ BUG_ON(ret);
+ pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
+ __func__, policy->cpu, policy->cur);
+ }
}
- policy->user_policy.min = policy->min;
- policy->user_policy.max = policy->max;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
- if (ret) {
- if (ret > 0)
- /* This is a managed cpu, symlink created,
- exit with 0 */
- ret = 0;
- goto err_unlock_policy;
+ if (!recover_policy) {
+ ret = cpufreq_add_dev_interface(policy, dev);
+ if (ret)
+ goto err_out_unregister;
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_CREATE_POLICY, policy);
}
- ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
- if (ret)
- goto err_out_unregister;
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_add(&policy->policy_list, &cpufreq_policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ cpufreq_init_policy(policy);
- unlock_policy_rwsem_write(cpu);
+ if (!recover_policy) {
+ policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
+ }
+ up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
- module_put(cpufreq_driver->owner);
- dprintk("initialization complete\n");
- cpufreq_debug_enable_ratelimit();
+ up_read(&cpufreq_rwsem);
- return 0;
+ pr_debug("initialization complete\n");
+ return 0;
err_out_unregister:
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+err_get_freq:
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobject_put(&policy->kobj);
- wait_for_completion(&policy->kobj_unregister);
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+err_set_policy_cpu:
+ if (recover_policy) {
+ /* Do not leave stale fallback data behind. */
+ per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
+ cpufreq_policy_put_kobj(policy);
+ }
+ cpufreq_policy_free(policy);
-err_unlock_policy:
- unlock_policy_rwsem_write(cpu);
- free_cpumask_var(policy->related_cpus);
-err_free_cpumask:
- free_cpumask_var(policy->cpus);
-err_free_policy:
- kfree(policy);
nomem_out:
- module_put(cpufreq_driver->owner);
-module_out:
- cpufreq_debug_enable_ratelimit();
+ up_read(&cpufreq_rwsem);
+
return ret;
}
-
/**
- * __cpufreq_remove_dev - remove a CPU device
+ * cpufreq_add_dev - add a CPU device
*
- * Removes the cpufreq interface for a CPU device.
- * Caller should already have policy_rwsem in write mode for this CPU.
- * This routine frees the rwsem before returning.
+ * Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
*/
-static int __cpufreq_remove_dev(struct sys_device *sys_dev)
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
- unsigned long flags;
- struct cpufreq_policy *data;
- struct kobject *kobj;
- struct completion *cmp;
-#ifdef CONFIG_SMP
- struct sys_device *cpu_sys_dev;
- unsigned int j;
-#endif
+ return __cpufreq_add_dev(dev, sif);
+}
- cpufreq_debug_disable_ratelimit();
- dprintk("unregistering CPU %u\n", cpu);
+static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
+ unsigned int old_cpu)
+{
+ struct device *cpu_dev;
+ int ret;
+
+ /* first sibling now owns the new sysfs dir */
+ cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
+
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
+
+ down_write(&policy->rwsem);
+ cpumask_set_cpu(old_cpu, policy->cpus);
+ up_write(&policy->rwsem);
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- data = per_cpu(cpufreq_cpu_data, cpu);
+ ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
+ "cpufreq");
- if (!data) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- cpufreq_debug_enable_ratelimit();
- unlock_policy_rwsem_write(cpu);
return -EINVAL;
}
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ return cpu_dev->id;
+}
-#ifdef CONFIG_SMP
- /* if this isn't the CPU which is the parent of the kobj, we
- * only need to unlink, put and exit
- */
- if (unlikely(cpu != data->cpu)) {
- dprintk("removing link\n");
- cpumask_clear_cpu(cpu, data->cpus);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &sys_dev->kobj;
- cpufreq_cpu_put(data);
- cpufreq_debug_enable_ratelimit();
- unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- return 0;
- }
-#endif
+static int __cpufreq_remove_dev_prepare(struct device *dev,
+ struct subsys_interface *sif)
+{
+ unsigned int cpu = dev->id, cpus;
+ int new_cpu, ret;
+ unsigned long flags;
+ struct cpufreq_policy *policy;
-#ifdef CONFIG_SMP
+ pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
- CPUFREQ_NAME_LEN);
-#endif
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
- /* if we have other CPUs still registered, we need to unlink them,
- * or else wait_for_completion below will lock up. Clean the
- * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
- * the sysfs links afterwards.
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- per_cpu(cpufreq_cpu_data, j) = NULL;
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ /* Save the policy somewhere when doing a light-weight tear-down */
+ if (cpufreq_suspended)
+ per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
+
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!policy) {
+ pr_debug("%s: No cpu_data found\n", __func__);
+ return -EINVAL;
+ }
+
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (ret) {
+ pr_err("%s: Failed to stop governor\n", __func__);
+ return ret;
}
}
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ if (!cpufreq_driver->setpolicy)
+ strncpy(per_cpu(cpufreq_cpu_governor, cpu),
+ policy->governor->name, CPUFREQ_NAME_LEN);
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- dprintk("removing link for cpu %u\n", j);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, j),
- data->governor->name, CPUFREQ_NAME_LEN);
-#endif
- cpu_sys_dev = get_cpu_sysdev(j);
- kobj = &cpu_sys_dev->kobj;
- unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- lock_policy_rwsem_write(cpu);
- cpufreq_cpu_put(data);
+ down_read(&policy->rwsem);
+ cpus = cpumask_weight(policy->cpus);
+ up_read(&policy->rwsem);
+
+ if (cpu != policy->cpu) {
+ sysfs_remove_link(&dev->kobj, "cpufreq");
+ } else if (cpus > 1) {
+ new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
+ if (new_cpu >= 0) {
+ update_policy_cpu(policy, new_cpu);
+
+ if (!cpufreq_suspended)
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, new_cpu, cpu);
}
+ } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
+ cpufreq_driver->stop_cpu(policy);
}
-#else
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
- if (cpufreq_driver->target)
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ return 0;
+}
- kobj = &data->kobj;
- cmp = &data->kobj_unregister;
- unlock_policy_rwsem_write(cpu);
- kobject_put(kobj);
+static int __cpufreq_remove_dev_finish(struct device *dev,
+ struct subsys_interface *sif)
+{
+ unsigned int cpu = dev->id, cpus;
+ int ret;
+ unsigned long flags;
+ struct cpufreq_policy *policy;
- /* we need to make sure that the underlying kobj is actually
- * not referenced anymore by anybody before we proceed with
- * unloading.
- */
- dprintk("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- dprintk("wait complete\n");
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- lock_policy_rwsem_write(cpu);
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(data);
- unlock_policy_rwsem_write(cpu);
+ if (!policy) {
+ pr_debug("%s: No cpu_data found\n", __func__);
+ return -EINVAL;
+ }
- free_cpumask_var(data->related_cpus);
- free_cpumask_var(data->cpus);
- kfree(data);
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ down_write(&policy->rwsem);
+ cpus = cpumask_weight(policy->cpus);
+
+ if (cpus > 1)
+ cpumask_clear_cpu(cpu, policy->cpus);
+ up_write(&policy->rwsem);
+
+ /* If cpu is last user of policy, free policy */
+ if (cpus == 1) {
+ if (has_target()) {
+ ret = __cpufreq_governor(policy,
+ CPUFREQ_GOV_POLICY_EXIT);
+ if (ret) {
+ pr_err("%s: Failed to exit governor\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (!cpufreq_suspended)
+ cpufreq_policy_put_kobj(policy);
+
+ /*
+ * Perform the ->exit() even during light-weight tear-down,
+ * since this is a core component, and is essential for the
+ * subsequent light-weight ->init() to succeed.
+ */
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ /* Remove policy from list of active policies */
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_del(&policy->policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!cpufreq_suspended)
+ cpufreq_policy_free(policy);
+ } else if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ if (ret) {
+ pr_err("%s: Failed to start governor\n", __func__);
+ return ret;
+ }
+ }
- cpufreq_debug_enable_ratelimit();
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
return 0;
}
-
-static int cpufreq_remove_dev(struct sys_device *sys_dev)
+/**
+ * cpufreq_remove_dev - remove a CPU device
+ *
+ * Removes the cpufreq interface for a CPU device.
+ */
+static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
- int retval;
+ unsigned int cpu = dev->id;
+ int ret;
if (cpu_is_offline(cpu))
return 0;
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
+ ret = __cpufreq_remove_dev_prepare(dev, sif);
- retval = __cpufreq_remove_dev(sys_dev);
- return retval;
-}
+ if (!ret)
+ ret = __cpufreq_remove_dev_finish(dev, sif);
+ return ret;
+}
static void handle_update(struct work_struct *work)
{
struct cpufreq_policy *policy =
container_of(work, struct cpufreq_policy, update);
unsigned int cpu = policy->cpu;
- dprintk("handle_update for cpu %u called\n", cpu);
+ pr_debug("handle_update for cpu %u called\n", cpu);
cpufreq_update_policy(cpu);
}
/**
- * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
+ * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
+ * in deep trouble.
* @cpu: cpu number
* @old_freq: CPU frequency the kernel thinks the CPU runs at
* @new_freq: CPU frequency the CPU actually runs at
@@ -1253,18 +1507,23 @@ static void handle_update(struct work_struct *work)
static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
unsigned int new_freq)
{
+ struct cpufreq_policy *policy;
struct cpufreq_freqs freqs;
+ unsigned long flags;
- dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
- "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
+ pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
+ old_freq, new_freq);
- freqs.cpu = cpu;
freqs.old = old_freq;
freqs.new = new_freq;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-}
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+}
/**
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
@@ -1275,9 +1534,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy;
unsigned int ret_freq = 0;
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
+ return cpufreq_driver->get(cpu);
+
+ policy = cpufreq_cpu_get(cpu);
if (policy) {
ret_freq = policy->cur;
cpufreq_cpu_put(policy);
@@ -1287,6 +1550,25 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_quick_get);
+/**
+ * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
+ * @cpu: CPU number
+ *
+ * Just return the max possible frequency for a given CPU.
+ */
+unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->max;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_quick_get_max);
static unsigned int __cpufreq_get(unsigned int cpu)
{
@@ -1319,126 +1601,140 @@ static unsigned int __cpufreq_get(unsigned int cpu)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- unsigned int ret_freq = 0;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
- if (!policy)
- goto out;
-
- if (unlikely(lock_policy_rwsem_read(cpu)))
- goto out_policy;
-
- ret_freq = __cpufreq_get(cpu);
+ if (policy) {
+ down_read(&policy->rwsem);
+ ret_freq = __cpufreq_get(cpu);
+ up_read(&policy->rwsem);
- unlock_policy_rwsem_read(cpu);
+ cpufreq_cpu_put(policy);
+ }
-out_policy:
- cpufreq_cpu_put(policy);
-out:
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_get);
+static struct subsys_interface cpufreq_interface = {
+ .name = "cpufreq",
+ .subsys = &cpu_subsys,
+ .add_dev = cpufreq_add_dev,
+ .remove_dev = cpufreq_remove_dev,
+};
-/**
- * cpufreq_suspend - let the low level driver prepare for suspend
+/*
+ * In case platform wants some specific frequency to be configured
+ * during suspend..
*/
-
-static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
+int cpufreq_generic_suspend(struct cpufreq_policy *policy)
{
- int ret = 0;
+ int ret;
- int cpu = sysdev->id;
- struct cpufreq_policy *cpu_policy;
+ if (!policy->suspend_freq) {
+ pr_err("%s: suspend_freq can't be zero\n", __func__);
+ return -EINVAL;
+ }
- dprintk("suspending cpu %u\n", cpu);
+ pr_debug("%s: Setting suspend-freq: %u\n", __func__,
+ policy->suspend_freq);
- if (!cpu_online(cpu))
- return 0;
+ ret = __cpufreq_driver_target(policy, policy->suspend_freq,
+ CPUFREQ_RELATION_H);
+ if (ret)
+ pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
+ __func__, policy->suspend_freq, ret);
- /* we may be lax here as interrupts are off. Nonetheless
- * we need to grab the correct cpu policy, as to check
- * whether we really run on this CPU.
- */
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_generic_suspend);
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
- return -EINVAL;
+/**
+ * cpufreq_suspend() - Suspend CPUFreq governors
+ *
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
+ * as some platforms can't change frequency after this point in suspend cycle.
+ * Because some of the devices (like: i2c, regulators, etc) they use for
+ * changing frequency are suspended quickly after this point.
+ */
+void cpufreq_suspend(void)
+{
+ struct cpufreq_policy *policy;
- /* only handle each CPU group once */
- if (unlikely(cpu_policy->cpu != cpu))
- goto out;
+ if (!cpufreq_driver)
+ return;
- if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(cpu_policy, pmsg);
- if (ret)
- printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
- "step on CPU %u\n", cpu_policy->cpu);
+ if (!has_target())
+ return;
+
+ pr_debug("%s: Suspending Governors\n", __func__);
+
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+ pr_err("%s: Failed to stop governor for policy: %p\n",
+ __func__, policy);
+ else if (cpufreq_driver->suspend
+ && cpufreq_driver->suspend(policy))
+ pr_err("%s: Failed to suspend driver: %p\n", __func__,
+ policy);
}
-out:
- cpufreq_cpu_put(cpu_policy);
- return ret;
+ cpufreq_suspended = true;
}
/**
- * cpufreq_resume - restore proper CPU frequency handling after resume
+ * cpufreq_resume() - Resume CPUFreq governors
*
- * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
- * restored. It will verify that the current freq is in sync with
- * what we believe it to be. This is a bit later than when it
- * should be, but nonethteless it's better than calling
- * cpufreq_driver->get() here which might re-enable interrupts...
+ * Called during system wide Suspend/Hibernate cycle for resuming governors that
+ * are suspended with cpufreq_suspend().
*/
-static int cpufreq_resume(struct sys_device *sysdev)
+void cpufreq_resume(void)
{
- int ret = 0;
-
- int cpu = sysdev->id;
- struct cpufreq_policy *cpu_policy;
+ struct cpufreq_policy *policy;
- dprintk("resuming cpu %u\n", cpu);
+ if (!cpufreq_driver)
+ return;
- if (!cpu_online(cpu))
- return 0;
+ if (!has_target())
+ return;
- /* we may be lax here as interrupts are off. Nonetheless
- * we need to grab the correct cpu policy, as to check
- * whether we really run on this CPU.
- */
+ pr_debug("%s: Resuming Governors\n", __func__);
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
- return -EINVAL;
+ cpufreq_suspended = false;
- /* only handle each CPU group once */
- if (unlikely(cpu_policy->cpu != cpu))
- goto fail;
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
+ pr_err("%s: Failed to resume driver: %p\n", __func__,
+ policy);
+ else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
+ || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
+ pr_err("%s: Failed to start governor for policy: %p\n",
+ __func__, policy);
- if (cpufreq_driver->resume) {
- ret = cpufreq_driver->resume(cpu_policy);
- if (ret) {
- printk(KERN_ERR "cpufreq: resume failed in ->resume "
- "step on CPU %u\n", cpu_policy->cpu);
- goto fail;
- }
+ /*
+ * schedule call cpufreq_update_policy() for boot CPU, i.e. last
+ * policy in list. It will verify that the current freq is in
+ * sync with what we believe it to be.
+ */
+ if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
+ schedule_work(&policy->update);
}
-
- schedule_work(&cpu_policy->update);
-
-fail:
- cpufreq_cpu_put(cpu_policy);
- return ret;
}
-static struct sysdev_driver cpufreq_sysdev_driver = {
- .add = cpufreq_add_dev,
- .remove = cpufreq_remove_dev,
- .suspend = cpufreq_suspend,
- .resume = cpufreq_resume,
-};
+/**
+ * cpufreq_get_current_driver - return current driver's name
+ *
+ * Return the name string of the currently loaded cpufreq driver
+ * or NULL, if none.
+ */
+const char *cpufreq_get_current_driver(void)
+{
+ if (cpufreq_driver)
+ return cpufreq_driver->name;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
/*********************************************************************
* NOTIFIER LISTS INTERFACE *
@@ -1461,6 +1757,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
+ if (cpufreq_disabled())
+ return -EINVAL;
+
WARN_ON(!init_cpufreq_transition_notifier_list_called);
switch (list) {
@@ -1480,11 +1779,10 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
}
EXPORT_SYMBOL(cpufreq_register_notifier);
-
/**
* cpufreq_unregister_notifier - unregister a driver with cpufreq
* @nb: notifier block to be unregistered
- * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
+ * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
*
* Remove a driver from the CPU frequency notifier list.
*
@@ -1495,6 +1793,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
+ if (cpufreq_disabled())
+ return -EINVAL;
+
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = srcu_notifier_chain_unregister(
@@ -1517,18 +1818,145 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
* GOVERNORS *
*********************************************************************/
+/* Must set freqs->new to intermediate frequency */
+static int __target_intermediate(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int index)
+{
+ int ret;
+
+ freqs->new = cpufreq_driver->get_intermediate(policy, index);
+
+ /* We don't need to switch to intermediate freq */
+ if (!freqs->new)
+ return 0;
+
+ pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
+ __func__, policy->cpu, freqs->old, freqs->new);
+
+ cpufreq_freq_transition_begin(policy, freqs);
+ ret = cpufreq_driver->target_intermediate(policy, index);
+ cpufreq_freq_transition_end(policy, freqs, ret);
+
+ if (ret)
+ pr_err("%s: Failed to change to intermediate frequency: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __target_index(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *freq_table, int index)
+{
+ struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
+ unsigned int intermediate_freq = 0;
+ int retval = -EINVAL;
+ bool notify;
+
+ notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
+ if (notify) {
+ /* Handle switching to intermediate frequency */
+ if (cpufreq_driver->get_intermediate) {
+ retval = __target_intermediate(policy, &freqs, index);
+ if (retval)
+ return retval;
+
+ intermediate_freq = freqs.new;
+ /* Set old freq to intermediate */
+ if (intermediate_freq)
+ freqs.old = freqs.new;
+ }
+
+ freqs.new = freq_table[index].frequency;
+ pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
+ __func__, policy->cpu, freqs.old, freqs.new);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ }
+
+ retval = cpufreq_driver->target_index(policy, index);
+ if (retval)
+ pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
+ retval);
+
+ if (notify) {
+ cpufreq_freq_transition_end(policy, &freqs, retval);
+
+ /*
+ * Failed after setting to intermediate freq? Driver should have
+ * reverted back to initial frequency and so should we. Check
+ * here for intermediate_freq instead of get_intermediate, in
+ * case we have't switched to intermediate freq at all.
+ */
+ if (unlikely(retval && intermediate_freq)) {
+ freqs.old = intermediate_freq;
+ freqs.new = policy->restore_freq;
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ }
+ }
+
+ return retval;
+}
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
+ unsigned int old_target_freq = target_freq;
int retval = -EINVAL;
- dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
- target_freq, relation);
- if (cpu_online(policy->cpu) && cpufreq_driver->target)
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ /* Make sure that target_freq is within supported range */
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+
+ pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
+ policy->cpu, target_freq, relation, old_target_freq);
+
+ /*
+ * This might look like a redundant call as we are checking it again
+ * after finding index. But it is left intentionally for cases where
+ * exactly same freq is called again and so we can save on few function
+ * calls.
+ */
+ if (target_freq == policy->cur)
+ return 0;
+
+ /* Save last value to restore later on errors */
+ policy->restore_freq = policy->cur;
+
+ if (cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
+ else if (cpufreq_driver->target_index) {
+ struct cpufreq_frequency_table *freq_table;
+ int index;
+
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!freq_table)) {
+ pr_err("%s: Unable to find freq_table\n", __func__);
+ goto out;
+ }
+
+ retval = cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index);
+ if (unlikely(retval)) {
+ pr_err("%s: Unable to find matching freq\n", __func__);
+ goto out;
+ }
+ if (freq_table[index].frequency == policy->cur) {
+ retval = 0;
+ goto out;
+ }
+
+ retval = __target_index(policy, freq_table, index);
+ }
+
+out:
return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1539,40 +1967,16 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
{
int ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
- if (!policy)
- goto no_policy;
-
- if (unlikely(lock_policy_rwsem_write(policy->cpu)))
- goto fail;
+ down_write(&policy->rwsem);
ret = __cpufreq_driver_target(policy, target_freq, relation);
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
-fail:
- cpufreq_cpu_put(policy);
-no_policy:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
-int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
-{
- int ret = 0;
-
- policy = cpufreq_cpu_get(policy->cpu);
- if (!policy)
- return -EINVAL;
-
- if (cpu_online(cpu) && cpufreq_driver->getavg)
- ret = cpufreq_driver->getavg(policy, cpu);
-
- cpufreq_cpu_put(policy);
- return ret;
-}
-EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
-
/*
* when "event" is CPUFREQ_GOV_LIMITS
*/
@@ -1592,39 +1996,68 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
struct cpufreq_governor *gov = NULL;
#endif
+ /* Don't start any governor operations if we are entering suspend */
+ if (cpufreq_suspended)
+ return 0;
+
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) {
if (!gov)
return -EINVAL;
else {
- printk(KERN_WARNING "%s governor failed, too long"
- " transition latency of HW, fallback"
- " to %s governor\n",
- policy->governor->name,
- gov->name);
+ pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
+ policy->governor->name, gov->name);
policy->governor = gov;
}
}
- if (!try_module_get(policy->governor->owner))
- return -EINVAL;
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ if (!try_module_get(policy->governor->owner))
+ return -EINVAL;
+
+ pr_debug("__cpufreq_governor for CPU %u, event %u\n",
+ policy->cpu, event);
+
+ mutex_lock(&cpufreq_governor_lock);
+ if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
+ || (!policy->governor_enabled
+ && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
+ mutex_unlock(&cpufreq_governor_lock);
+ return -EBUSY;
+ }
+
+ if (event == CPUFREQ_GOV_STOP)
+ policy->governor_enabled = false;
+ else if (event == CPUFREQ_GOV_START)
+ policy->governor_enabled = true;
+
+ mutex_unlock(&cpufreq_governor_lock);
- dprintk("__cpufreq_governor for CPU %u, event %u\n",
- policy->cpu, event);
ret = policy->governor->governor(policy, event);
- /* we keep one module reference alive for
- each CPU governed by this CPU */
- if ((event != CPUFREQ_GOV_START) || ret)
- module_put(policy->governor->owner);
- if ((event == CPUFREQ_GOV_STOP) && !ret)
+ if (!ret) {
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ policy->governor->initialized++;
+ else if (event == CPUFREQ_GOV_POLICY_EXIT)
+ policy->governor->initialized--;
+ } else {
+ /* Restore original values */
+ mutex_lock(&cpufreq_governor_lock);
+ if (event == CPUFREQ_GOV_STOP)
+ policy->governor_enabled = true;
+ else if (event == CPUFREQ_GOV_START)
+ policy->governor_enabled = false;
+ mutex_unlock(&cpufreq_governor_lock);
+ }
+
+ if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
+ ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
module_put(policy->governor->owner);
return ret;
}
-
int cpufreq_register_governor(struct cpufreq_governor *governor)
{
int err;
@@ -1632,8 +2065,12 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
if (!governor)
return -EINVAL;
+ if (cpufreq_disabled())
+ return -ENODEV;
+
mutex_lock(&cpufreq_governor_mutex);
+ governor->initialized = 0;
err = -EBUSY;
if (__find_governor(governor->name) == NULL) {
err = 0;
@@ -1645,24 +2082,22 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
}
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
-
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{
-#ifdef CONFIG_HOTPLUG_CPU
int cpu;
-#endif
if (!governor)
return;
-#ifdef CONFIG_HOTPLUG_CPU
+ if (cpufreq_disabled())
+ return;
+
for_each_present_cpu(cpu) {
if (cpu_online(cpu))
continue;
if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
}
-#endif
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
@@ -1672,7 +2107,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
-
/*********************************************************************
* POLICY INTERFACE *
*********************************************************************/
@@ -1694,182 +2128,188 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
if (!cpu_policy)
return -EINVAL;
- memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
+ memcpy(policy, cpu_policy, sizeof(*policy));
cpufreq_cpu_put(cpu_policy);
return 0;
}
EXPORT_SYMBOL(cpufreq_get_policy);
-
/*
- * data : current policy.
- * policy : policy to be set.
+ * policy : current policy.
+ * new_policy: policy to be set.
*/
-static int __cpufreq_set_policy(struct cpufreq_policy *data,
- struct cpufreq_policy *policy)
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy)
{
- int ret = 0;
+ struct cpufreq_governor *old_gov;
+ int ret;
- cpufreq_debug_disable_ratelimit();
- dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
- policy->min, policy->max);
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+ new_policy->cpu, new_policy->min, new_policy->max);
- memcpy(&policy->cpuinfo, &data->cpuinfo,
- sizeof(struct cpufreq_cpuinfo));
+ memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
- if (policy->min > data->max || policy->max < data->min) {
- ret = -EINVAL;
- goto error_out;
- }
+ if (new_policy->min > policy->max || new_policy->max < policy->min)
+ return -EINVAL;
/* verify the cpu speed can be set within this limit */
- ret = cpufreq_driver->verify(policy);
+ ret = cpufreq_driver->verify(new_policy);
if (ret)
- goto error_out;
+ return ret;
/* adjust if necessary - all reasons */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_ADJUST, policy);
+ CPUFREQ_ADJUST, new_policy);
/* adjust if necessary - hardware incompatibility*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_INCOMPATIBLE, policy);
+ CPUFREQ_INCOMPATIBLE, new_policy);
- /* verify the cpu speed can be set within this limit,
- which might be different to the first one */
- ret = cpufreq_driver->verify(policy);
+ /*
+ * verify the cpu speed can be set within this limit, which might be
+ * different to the first one
+ */
+ ret = cpufreq_driver->verify(new_policy);
if (ret)
- goto error_out;
+ return ret;
/* notification of the new policy */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_NOTIFY, policy);
+ CPUFREQ_NOTIFY, new_policy);
- data->min = policy->min;
- data->max = policy->max;
+ policy->min = new_policy->min;
+ policy->max = new_policy->max;
- dprintk("new min and max freqs are %u - %u kHz\n",
- data->min, data->max);
+ pr_debug("new min and max freqs are %u - %u kHz\n",
+ policy->min, policy->max);
if (cpufreq_driver->setpolicy) {
- data->policy = policy->policy;
- dprintk("setting range\n");
- ret = cpufreq_driver->setpolicy(policy);
- } else {
- if (policy->governor != data->governor) {
- /* save old, working values */
- struct cpufreq_governor *old_gov = data->governor;
-
- dprintk("governor switch\n");
-
- /* end old governor */
- if (data->governor)
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-
- /* start new governor */
- data->governor = policy->governor;
- if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
- /* new governor failed, so re-start old one */
- dprintk("starting governor %s failed\n",
- data->governor->name);
- if (old_gov) {
- data->governor = old_gov;
- __cpufreq_governor(data,
- CPUFREQ_GOV_START);
- }
- ret = -EINVAL;
- goto error_out;
- }
- /* might be a policy change, too, so fall through */
- }
- dprintk("governor: change or update limits\n");
- __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+ policy->policy = new_policy->policy;
+ pr_debug("setting range\n");
+ return cpufreq_driver->setpolicy(new_policy);
}
-error_out:
- cpufreq_debug_enable_ratelimit();
- return ret;
+ if (new_policy->governor == policy->governor)
+ goto out;
+
+ pr_debug("governor switch\n");
+
+ /* save old, working values */
+ old_gov = policy->governor;
+ /* end old governor */
+ if (old_gov) {
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ up_write(&policy->rwsem);
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ down_write(&policy->rwsem);
+ }
+
+ /* start new governor */
+ policy->governor = new_policy->governor;
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+ goto out;
+
+ up_write(&policy->rwsem);
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ down_write(&policy->rwsem);
+ }
+
+ /* new governor failed, so re-start old one */
+ pr_debug("starting governor %s failed\n", policy->governor->name);
+ if (old_gov) {
+ policy->governor = old_gov;
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ }
+
+ return -EINVAL;
+
+ out:
+ pr_debug("governor: change or update limits\n");
+ return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
}
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
*
- * Usefull for policy notifiers which have different necessities
+ * Useful for policy notifiers which have different necessities
* at different times.
*/
int cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
- struct cpufreq_policy policy;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy new_policy;
int ret;
- if (!data) {
- ret = -ENODEV;
- goto no_policy;
- }
+ if (!policy)
+ return -ENODEV;
- if (unlikely(lock_policy_rwsem_write(cpu))) {
- ret = -EINVAL;
- goto fail;
- }
+ down_write(&policy->rwsem);
- dprintk("updating policy for CPU %u\n", cpu);
- memcpy(&policy, data, sizeof(struct cpufreq_policy));
- policy.min = data->user_policy.min;
- policy.max = data->user_policy.max;
- policy.policy = data->user_policy.policy;
- policy.governor = data->user_policy.governor;
+ pr_debug("updating policy for CPU %u\n", cpu);
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.min = policy->user_policy.min;
+ new_policy.max = policy->user_policy.max;
+ new_policy.policy = policy->user_policy.policy;
+ new_policy.governor = policy->user_policy.governor;
- /* BIOS might change freq behind our back
- -> ask driver for current freq and notify governors about a change */
- if (cpufreq_driver->get) {
- policy.cur = cpufreq_driver->get(cpu);
- if (!data->cur) {
- dprintk("Driver did not initialize current freq");
- data->cur = policy.cur;
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ new_policy.cur = cpufreq_driver->get(cpu);
+ if (WARN_ON(!new_policy.cur)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (!policy->cur) {
+ pr_debug("Driver did not initialize current freq\n");
+ policy->cur = new_policy.cur;
} else {
- if (data->cur != policy.cur)
- cpufreq_out_of_sync(cpu, data->cur,
- policy.cur);
+ if (policy->cur != new_policy.cur && has_target())
+ cpufreq_out_of_sync(cpu, policy->cur,
+ new_policy.cur);
}
}
- ret = __cpufreq_set_policy(data, &policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
- unlock_policy_rwsem_write(cpu);
+unlock:
+ up_write(&policy->rwsem);
-fail:
- cpufreq_cpu_put(data);
-no_policy:
+ cpufreq_cpu_put(policy);
return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
-static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
+static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *dev;
- sys_dev = get_cpu_sysdev(cpu);
- if (sys_dev) {
- switch (action) {
+ dev = get_cpu_device(cpu);
+ if (dev) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpufreq_add_dev(sys_dev);
+ __cpufreq_add_dev(dev, NULL);
break;
+
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
+ __cpufreq_remove_dev_prepare(dev, NULL);
+ break;
- __cpufreq_remove_dev(sys_dev);
+ case CPU_POST_DEAD:
+ __cpufreq_remove_dev_finish(dev, NULL);
break;
+
case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- cpufreq_add_dev(sys_dev);
+ __cpufreq_add_dev(dev, NULL);
break;
}
}
@@ -1877,10 +2317,77 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
+ .notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
+ * BOOST *
+ *********************************************************************/
+static int cpufreq_boost_set_sw(int state)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_policy *policy;
+ int ret = -EINVAL;
+
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (freq_table) {
+ ret = cpufreq_frequency_table_cpuinfo(policy,
+ freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n",
+ __func__);
+ break;
+ }
+ policy->user_policy.max = policy->max;
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ }
+ }
+
+ return ret;
+}
+
+int cpufreq_boost_trigger_state(int state)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (cpufreq_driver->boost_enabled == state)
+ return 0;
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ ret = cpufreq_driver->set_boost(state);
+ if (ret) {
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = !state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ pr_err("%s: Cannot %s BOOST\n",
+ __func__, state ? "enable" : "disable");
+ }
+
+ return ret;
+}
+
+int cpufreq_boost_supported(void)
+{
+ if (likely(cpufreq_driver))
+ return cpufreq_driver->boost_supported;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
+
+int cpufreq_boost_enabled(void)
+{
+ return cpufreq_driver->boost_enabled;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
+
+/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/
@@ -1889,7 +2396,7 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
* @driver_data: A struct cpufreq_driver containing the values#
* submitted by the CPU Frequency driver.
*
- * Registers a CPU Frequency driver to this core code. This code
+ * Registers a CPU Frequency driver to this core code. This code
* returns zero on success, -EBUSY when another driver got here first
* (and isn't unregistered in the meantime).
*
@@ -1899,27 +2406,51 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
unsigned long flags;
int ret;
+ if (cpufreq_disabled())
+ return -ENODEV;
+
if (!driver_data || !driver_data->verify || !driver_data->init ||
- ((!driver_data->setpolicy) && (!driver_data->target)))
+ !(driver_data->setpolicy || driver_data->target_index ||
+ driver_data->target) ||
+ (driver_data->setpolicy && (driver_data->target_index ||
+ driver_data->target)) ||
+ (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
return -EINVAL;
- dprintk("trying to register driver %s\n", driver_data->name);
+ pr_debug("trying to register driver %s\n", driver_data->name);
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EBUSY;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return -EEXIST;
}
cpufreq_driver = driver_data;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (cpufreq_boost_supported()) {
+ /*
+ * Check if driver provides function to enable boost -
+ * if not, use cpufreq_boost_set_sw as default
+ */
+ if (!cpufreq_driver->set_boost)
+ cpufreq_driver->set_boost = cpufreq_boost_set_sw;
- ret = sysdev_driver_register(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
+ ret = cpufreq_sysfs_create_file(&boost.attr);
+ if (ret) {
+ pr_err("%s: cannot register global BOOST sysfs file\n",
+ __func__);
+ goto err_null_driver;
+ }
+ }
- if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+ ret = subsys_interface_register(&cpufreq_interface);
+ if (ret)
+ goto err_boost_unreg;
+
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
@@ -1932,32 +2463,33 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
/* if all ->init() calls failed, unregister */
if (ret) {
- dprintk("no CPU initialized for driver %s\n",
- driver_data->name);
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ pr_debug("no CPU initialized for driver %s\n",
+ driver_data->name);
+ goto err_if_unreg;
}
}
- if (!ret) {
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- dprintk("driver %s up and running\n", driver_data->name);
- cpufreq_debug_enable_ratelimit();
- }
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ pr_debug("driver %s up and running\n", driver_data->name);
+ return 0;
+err_if_unreg:
+ subsys_interface_unregister(&cpufreq_interface);
+err_boost_unreg:
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
+err_null_driver:
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
-
/**
* cpufreq_unregister_driver - unregister the current CPUFreq driver
*
- * Unregister the current CPUFreq driver. Only call this if you have
+ * Unregister the current CPUFreq driver. Only call this if you have
* the right to do so, i.e. if you have succeeded in initialising before!
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
@@ -1966,21 +2498,24 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
{
unsigned long flags;
- cpufreq_debug_disable_ratelimit();
-
- if (!cpufreq_driver || (driver != cpufreq_driver)) {
- cpufreq_debug_enable_ratelimit();
+ if (!cpufreq_driver || (driver != cpufreq_driver))
return -EINVAL;
- }
- dprintk("unregistering driver %s\n", driver->name);
+ pr_debug("unregistering driver %s\n", driver->name);
+
+ subsys_interface_unregister(&cpufreq_interface);
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
- sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ down_write(&cpufreq_rwsem);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+
cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ up_write(&cpufreq_rwsem);
return 0;
}
@@ -1988,15 +2523,10 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
static int __init cpufreq_core_init(void)
{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- per_cpu(cpufreq_policy_cpu, cpu) = -1;
- init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
- }
+ if (cpufreq_disabled())
+ return -ENODEV;
- cpufreq_global_kobject = kobject_create_and_add("cpufreq",
- &cpu_sysdev_class.kset.kobj);
+ cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
return 0;