aboutsummaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq_stats.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq_stats.c')
-rw-r--r--drivers/cpufreq/cpufreq_stats.c157
1 files changed, 62 insertions, 95 deletions
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 4cf0d2805cb..0cd9b4dcef9 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -13,7 +13,7 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
static spinlock_t cpufreq_stats_lock;
@@ -151,76 +151,62 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
return -1;
}
-/* should be called late in the CPU removal sequence so that the stats
- * memory is still available in case someone tries to use it.
- */
-static void cpufreq_stats_free_table(unsigned int cpu)
+static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
{
- struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
- if (stat) {
- pr_debug("%s: Free stat table\n", __func__);
- kfree(stat->time_in_state);
- kfree(stat);
- per_cpu(cpufreq_stats_table, cpu) = NULL;
- }
+ if (!stat)
+ return;
+
+ pr_debug("%s: Free stat table\n", __func__);
+
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ kfree(stat->time_in_state);
+ kfree(stat);
+ per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
}
-/* must be called early in the CPU removal sequence (before
- * cpufreq_remove_dev) so that policy is still valid.
- */
-static void cpufreq_stats_free_sysfs(unsigned int cpu)
+static void cpufreq_stats_free_table(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy;
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- if (!cpufreq_frequency_get_table(cpu))
- goto put_ref;
-
- if (!policy_is_shared(policy)) {
- pr_debug("%s: Free sysfs stat\n", __func__);
- sysfs_remove_group(&policy->kobj, &stats_attr_group);
- }
+ if (cpufreq_frequency_get_table(policy->cpu))
+ __cpufreq_stats_free_table(policy);
-put_ref:
cpufreq_cpu_put(policy);
}
-static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
- unsigned int i, j, count = 0, ret = 0;
+ unsigned int i, count = 0, ret = 0;
struct cpufreq_stats *stat;
- struct cpufreq_policy *current_policy;
unsigned int alloc_size;
unsigned int cpu = policy->cpu;
+ struct cpufreq_frequency_table *pos, *table;
+
+ table = cpufreq_frequency_get_table(cpu);
+ if (unlikely(!table))
+ return 0;
+
if (per_cpu(cpufreq_stats_table, cpu))
return -EBUSY;
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if ((stat) == NULL)
return -ENOMEM;
- current_policy = cpufreq_cpu_get(cpu);
- if (current_policy == NULL) {
- ret = -EINVAL;
- goto error_get_fail;
- }
-
- ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
+ ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
if (ret)
goto error_out;
stat->cpu = cpu;
per_cpu(cpufreq_stats_table, cpu) = stat;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned int freq = table[i].frequency;
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
+ cpufreq_for_each_valid_entry(pos, table)
count++;
- }
alloc_size = count * sizeof(int) + count * sizeof(u64);
@@ -231,36 +217,48 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
if (!stat->time_in_state) {
ret = -ENOMEM;
- goto error_out;
+ goto error_alloc;
}
stat->freq_table = (unsigned int *)(stat->time_in_state + count);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
stat->trans_table = stat->freq_table + count;
#endif
- j = 0;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned int freq = table[i].frequency;
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
- if (freq_table_get_index(stat, freq) == -1)
- stat->freq_table[j++] = freq;
- }
- stat->state_num = j;
+ i = 0;
+ cpufreq_for_each_valid_entry(pos, table)
+ if (freq_table_get_index(stat, pos->frequency) == -1)
+ stat->freq_table[i++] = pos->frequency;
+ stat->state_num = i;
spin_lock(&cpufreq_stats_lock);
stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock);
- cpufreq_cpu_put(current_policy);
return 0;
+error_alloc:
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
error_out:
- cpufreq_cpu_put(current_policy);
-error_get_fail:
kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL;
return ret;
}
+static void cpufreq_stats_create_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+
+ /*
+ * "likely(!policy)" because normally cpufreq_stats will be registered
+ * before cpufreq driver
+ */
+ policy = cpufreq_cpu_get(cpu);
+ if (likely(!policy))
+ return;
+
+ __cpufreq_stats_create_table(policy);
+
+ cpufreq_cpu_put(policy);
+}
+
static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
@@ -277,25 +275,20 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
- int ret;
+ int ret = 0;
struct cpufreq_policy *policy = data;
- struct cpufreq_frequency_table *table;
- unsigned int cpu = policy->cpu;
if (val == CPUFREQ_UPDATE_POLICY_CPU) {
cpufreq_stats_update_policy_cpu(policy);
return 0;
}
- if (val != CPUFREQ_NOTIFY)
- return 0;
- table = cpufreq_frequency_get_table(cpu);
- if (!table)
- return 0;
- ret = cpufreq_stats_create_table(policy, table);
- if (ret)
- return ret;
- return 0;
+ if (val == CPUFREQ_CREATE_POLICY)
+ ret = __cpufreq_stats_create_table(policy);
+ else if (val == CPUFREQ_REMOVE_POLICY)
+ __cpufreq_stats_free_table(policy);
+
+ return ret;
}
static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
@@ -334,29 +327,6 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
return 0;
}
-static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- cpufreq_stats_free_sysfs(cpu);
- break;
- case CPU_DEAD:
- cpufreq_stats_free_table(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-/* priority=1 so this will get called before cpufreq_remove_dev */
-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
- .notifier_call = cpufreq_stat_cpu_callback,
- .priority = 1,
-};
-
static struct notifier_block notifier_policy_block = {
.notifier_call = cpufreq_stat_notifier_policy
};
@@ -376,14 +346,14 @@ static int __init cpufreq_stats_init(void)
if (ret)
return ret;
- register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ for_each_online_cpu(cpu)
+ cpufreq_stats_create_table(cpu);
ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
cpufreq_unregister_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
- unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
return ret;
@@ -399,11 +369,8 @@ static void __exit cpufreq_stats_exit(void)
CPUFREQ_POLICY_NOTIFIER);
cpufreq_unregister_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
- unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
- for_each_online_cpu(cpu) {
+ for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
- cpufreq_stats_free_sysfs(cpu);
- }
}
MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");