diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
| -rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 786 | 
1 files changed, 221 insertions, 565 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 526bfbf6961..25a70d06c5b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -11,221 +11,150 @@   * published by the Free Software Foundation.   */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/cpu.h> -#include <linux/jiffies.h> -#include <linux/kernel_stat.h> -#include <linux/mutex.h> -#include <linux/hrtimer.h> -#include <linux/tick.h> -#include <linux/ktime.h> -#include <linux/sched.h> - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ +#include <linux/slab.h> +#include "cpufreq_governor.h" +/* Conservative governor macros */  #define DEF_FREQUENCY_UP_THRESHOLD		(80)  #define DEF_FREQUENCY_DOWN_THRESHOLD		(20) +#define DEF_FREQUENCY_STEP			(5) +#define DEF_SAMPLING_DOWN_FACTOR		(1) +#define MAX_SAMPLING_DOWN_FACTOR		(10) + +static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); + +static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, +					   struct cpufreq_policy *policy) +{ +	unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100; + +	/* max freq cannot be less than 100. But who knows... */ +	if (unlikely(freq_target == 0)) +		freq_target = DEF_FREQUENCY_STEP; + +	return freq_target; +}  /* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. + * Every sampling_rate, we check, if current idle time is less than 20% + * (default), then we try to increase frequency. Every sampling_rate * + * sampling_down_factor, we check, if current idle time is more than 80% + * (default), then we try to decrease frequency + * + * Any frequency increase takes it to the maximum frequency. Frequency reduction + * happens at minimum steps of 5% (default) of maximum frequency   */ -#define MIN_SAMPLING_RATE_RATIO			(2) - -static unsigned int min_sampling_rate; +static void cs_check_cpu(int cpu, unsigned int load) +{ +	struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); +	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; +	struct dbs_data *dbs_data = policy->governor_data; +	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; -#define LATENCY_MULTIPLIER			(1000) -#define MIN_LATENCY_MULTIPLIER			(100) -#define DEF_SAMPLING_DOWN_FACTOR		(1) -#define MAX_SAMPLING_DOWN_FACTOR		(10) -#define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { -	cputime64_t prev_cpu_idle; -	cputime64_t prev_cpu_wall; -	cputime64_t prev_cpu_nice; -	struct cpufreq_policy *cur_policy; -	struct delayed_work work; -	unsigned int down_skip; -	unsigned int requested_freq; -	int cpu; -	unsigned int enable:1;  	/* -	 * percpu mutex that serializes governor limit change with -	 * do_dbs_timer invocation. We do not want do_dbs_timer to run -	 * when user is changing the governor or limits. +	 * break out if we 'cannot' reduce the speed as the user might +	 * want freq_step to be zero  	 */ -	struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); +	if (cs_tuners->freq_step == 0) +		return; -static unsigned int dbs_enable;	/* number of CPUs using this policy */ +	/* Check for frequency increase */ +	if (load > cs_tuners->up_threshold) { +		dbs_info->down_skip = 0; -/* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct workqueue_struct	*kconservative_wq; - -static struct dbs_tuners { -	unsigned int sampling_rate; -	unsigned int sampling_down_factor; -	unsigned int up_threshold; -	unsigned int down_threshold; -	unsigned int ignore_nice; -	unsigned int freq_step; -} dbs_tuners_ins = { -	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD, -	.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, -	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, -	.ignore_nice = 0, -	.freq_step = 5, -}; +		/* if we are already at full speed then break out early */ +		if (dbs_info->requested_freq == policy->max) +			return; -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, -							cputime64_t *wall) -{ -	cputime64_t idle_time; -	cputime64_t cur_wall_time; -	cputime64_t busy_time; +		dbs_info->requested_freq += get_freq_target(cs_tuners, policy); -	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); -	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, -			kstat_cpu(cpu).cpustat.system); +		if (dbs_info->requested_freq > policy->max) +			dbs_info->requested_freq = policy->max; -	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); -	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); -	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); -	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); +		__cpufreq_driver_target(policy, dbs_info->requested_freq, +			CPUFREQ_RELATION_H); +		return; +	} -	idle_time = cputime64_sub(cur_wall_time, busy_time); -	if (wall) -		*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); +	/* if sampling_down_factor is active break out early */ +	if (++dbs_info->down_skip < cs_tuners->sampling_down_factor) +		return; +	dbs_info->down_skip = 0; -	return (cputime64_t)jiffies_to_usecs(idle_time);; -} +	/* Check for frequency decrease */ +	if (load < cs_tuners->down_threshold) { +		unsigned int freq_target; +		/* +		 * if we cannot reduce the frequency anymore, break out early +		 */ +		if (policy->cur == policy->min) +			return; -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ -	u64 idle_time = get_cpu_idle_time_us(cpu, wall); +		freq_target = get_freq_target(cs_tuners, policy); +		if (dbs_info->requested_freq > freq_target) +			dbs_info->requested_freq -= freq_target; +		else +			dbs_info->requested_freq = policy->min; -	if (idle_time == -1ULL) -		return get_cpu_idle_time_jiffy(cpu, wall); +		__cpufreq_driver_target(policy, dbs_info->requested_freq, +				CPUFREQ_RELATION_L); +		return; +	} +} -	return idle_time; +static void cs_dbs_timer(struct work_struct *work) +{ +	struct cs_cpu_dbs_info_s *dbs_info = container_of(work, +			struct cs_cpu_dbs_info_s, cdbs.work.work); +	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; +	struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, +			cpu); +	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; +	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; +	int delay = delay_for_sampling_rate(cs_tuners->sampling_rate); +	bool modify_all = true; + +	mutex_lock(&core_dbs_info->cdbs.timer_mutex); +	if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) +		modify_all = false; +	else +		dbs_check_cpu(dbs_data, cpu); + +	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); +	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);  } -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, -		     void *data) +static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, +		void *data)  {  	struct cpufreq_freqs *freq = data; -	struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, -							freq->cpu); - +	struct cs_cpu_dbs_info_s *dbs_info = +					&per_cpu(cs_cpu_dbs_info, freq->cpu);  	struct cpufreq_policy *policy; -	if (!this_dbs_info->enable) +	if (!dbs_info->enable)  		return 0; -	policy = this_dbs_info->cur_policy; +	policy = dbs_info->cdbs.cur_policy;  	/* -	 * we only care if our internally tracked freq moves outside -	 * the 'valid' ranges of freqency available to us otherwise -	 * we do not change it +	 * we only care if our internally tracked freq moves outside the 'valid' +	 * ranges of frequency available to us otherwise we do not change it  	*/ -	if (this_dbs_info->requested_freq > policy->max -			|| this_dbs_info->requested_freq < policy->min) -		this_dbs_info->requested_freq = freq->new; +	if (dbs_info->requested_freq > policy->max +			|| dbs_info->requested_freq < policy->min) +		dbs_info->requested_freq = freq->new;  	return 0;  } -static struct notifier_block dbs_cpufreq_notifier_block = { -	.notifier_call = dbs_cpufreq_notifier -}; -  /************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct kobject *kobj, -				      struct attribute *attr, char *buf) -{ -	printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " -		    "sysfs file is deprecated - used by: %s\n", current->comm); -	return sprintf(buf, "%u\n", -1U); -} +static struct common_dbs_data cs_dbs_cdata; -static ssize_t show_sampling_rate_min(struct kobject *kobj, -				      struct attribute *attr, char *buf) -{ -	return sprintf(buf, "%u\n", min_sampling_rate); -} - -define_one_global_ro(sampling_rate_max); -define_one_global_ro(sampling_rate_min); - -/* cpufreq_conservative Governor Tunables */ -#define show_one(file_name, object)					\ -static ssize_t show_##file_name						\ -(struct kobject *kobj, struct attribute *attr, char *buf)		\ -{									\ -	return sprintf(buf, "%u\n", dbs_tuners_ins.object);		\ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); -show_one(freq_step, freq_step); - -/*** delete after deprecation time ***/ -#define DEPRECATION_MSG(file_name)					\ -	printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "	\ -		"interface is deprecated - " #file_name "\n"); - -#define show_one_old(file_name)						\ -static ssize_t show_##file_name##_old					\ -(struct cpufreq_policy *unused, char *buf)				\ -{									\ -	printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "	\ -		"interface is deprecated - " #file_name "\n");		\ -	return show_##file_name(NULL, NULL, buf);			\ -} -show_one_old(sampling_rate); -show_one_old(sampling_down_factor); -show_one_old(up_threshold); -show_one_old(down_threshold); -show_one_old(ignore_nice_load); -show_one_old(freq_step); -show_one_old(sampling_rate_min); -show_one_old(sampling_rate_max); - -cpufreq_freq_attr_ro_old(sampling_rate_min); -cpufreq_freq_attr_ro_old(sampling_rate_max); - -/*** delete after deprecation time ***/ - -static ssize_t store_sampling_down_factor(struct kobject *a, -					  struct attribute *b, -					  const char *buf, size_t count) +static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, +		const char *buf, size_t count)  { +	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;  	unsigned int input;  	int ret;  	ret = sscanf(buf, "%u", &input); @@ -233,16 +162,14 @@ static ssize_t store_sampling_down_factor(struct kobject *a,  	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)  		return -EINVAL; -	mutex_lock(&dbs_mutex); -	dbs_tuners_ins.sampling_down_factor = input; -	mutex_unlock(&dbs_mutex); - +	cs_tuners->sampling_down_factor = input;  	return count;  } -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, -				   const char *buf, size_t count) +static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, +		size_t count)  { +	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;  	unsigned int input;  	int ret;  	ret = sscanf(buf, "%u", &input); @@ -250,62 +177,49 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,  	if (ret != 1)  		return -EINVAL; -	mutex_lock(&dbs_mutex); -	dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); -	mutex_unlock(&dbs_mutex); - +	cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);  	return count;  } -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, -				  const char *buf, size_t count) +static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, +		size_t count)  { +	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;  	unsigned int input;  	int ret;  	ret = sscanf(buf, "%u", &input); -	mutex_lock(&dbs_mutex); -	if (ret != 1 || input > 100 || -			input <= dbs_tuners_ins.down_threshold) { -		mutex_unlock(&dbs_mutex); +	if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)  		return -EINVAL; -	} - -	dbs_tuners_ins.up_threshold = input; -	mutex_unlock(&dbs_mutex); +	cs_tuners->up_threshold = input;  	return count;  } -static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, -				    const char *buf, size_t count) +static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, +		size_t count)  { +	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;  	unsigned int input;  	int ret;  	ret = sscanf(buf, "%u", &input); -	mutex_lock(&dbs_mutex);  	/* cannot be lower than 11 otherwise freq will not fall */  	if (ret != 1 || input < 11 || input > 100 || -			input >= dbs_tuners_ins.up_threshold) { -		mutex_unlock(&dbs_mutex); +			input >= cs_tuners->up_threshold)  		return -EINVAL; -	} - -	dbs_tuners_ins.down_threshold = input; -	mutex_unlock(&dbs_mutex); +	cs_tuners->down_threshold = input;  	return count;  } -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, -				      const char *buf, size_t count) +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, +		const char *buf, size_t count)  { -	unsigned int input; +	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; +	unsigned int input, j;  	int ret; -	unsigned int j; -  	ret = sscanf(buf, "%u", &input);  	if (ret != 1)  		return -EINVAL; @@ -313,30 +227,28 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,  	if (input > 1)  		input = 1; -	mutex_lock(&dbs_mutex); -	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ -		mutex_unlock(&dbs_mutex); +	if (input == cs_tuners->ignore_nice_load) /* nothing to do */  		return count; -	} -	dbs_tuners_ins.ignore_nice = input; + +	cs_tuners->ignore_nice_load = input;  	/* we need to re-evaluate prev_cpu_idle */  	for_each_online_cpu(j) { -		struct cpu_dbs_info_s *dbs_info; +		struct cs_cpu_dbs_info_s *dbs_info;  		dbs_info = &per_cpu(cs_cpu_dbs_info, j); -		dbs_info->prev_cpu_idle = get_cpu_idle_time(j, -						&dbs_info->prev_cpu_wall); -		if (dbs_tuners_ins.ignore_nice) -			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; +		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, +					&dbs_info->cdbs.prev_cpu_wall, 0); +		if (cs_tuners->ignore_nice_load) +			dbs_info->cdbs.prev_cpu_nice = +				kcpustat_cpu(j).cpustat[CPUTIME_NICE];  	} -	mutex_unlock(&dbs_mutex); -  	return count;  } -static ssize_t store_freq_step(struct kobject *a, struct attribute *b, -			       const char *buf, size_t count) +static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf, +		size_t count)  { +	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;  	unsigned int input;  	int ret;  	ret = sscanf(buf, "%u", &input); @@ -347,361 +259,119 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,  	if (input > 100)  		input = 100; -	/* no need to test here if freq_step is zero as the user might actually -	 * want this, they would be crazy though :) */ -	mutex_lock(&dbs_mutex); -	dbs_tuners_ins.freq_step = input; -	mutex_unlock(&dbs_mutex); - +	/* +	 * no need to test here if freq_step is zero as the user might actually +	 * want this, they would be crazy though :) +	 */ +	cs_tuners->freq_step = input;  	return count;  } -define_one_global_rw(sampling_rate); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(up_threshold); -define_one_global_rw(down_threshold); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(freq_step); - -static struct attribute *dbs_attributes[] = { -	&sampling_rate_max.attr, -	&sampling_rate_min.attr, -	&sampling_rate.attr, -	&sampling_down_factor.attr, -	&up_threshold.attr, -	&down_threshold.attr, -	&ignore_nice_load.attr, -	&freq_step.attr, +show_store_one(cs, sampling_rate); +show_store_one(cs, sampling_down_factor); +show_store_one(cs, up_threshold); +show_store_one(cs, down_threshold); +show_store_one(cs, ignore_nice_load); +show_store_one(cs, freq_step); +declare_show_sampling_rate_min(cs); + +gov_sys_pol_attr_rw(sampling_rate); +gov_sys_pol_attr_rw(sampling_down_factor); +gov_sys_pol_attr_rw(up_threshold); +gov_sys_pol_attr_rw(down_threshold); +gov_sys_pol_attr_rw(ignore_nice_load); +gov_sys_pol_attr_rw(freq_step); +gov_sys_pol_attr_ro(sampling_rate_min); + +static struct attribute *dbs_attributes_gov_sys[] = { +	&sampling_rate_min_gov_sys.attr, +	&sampling_rate_gov_sys.attr, +	&sampling_down_factor_gov_sys.attr, +	&up_threshold_gov_sys.attr, +	&down_threshold_gov_sys.attr, +	&ignore_nice_load_gov_sys.attr, +	&freq_step_gov_sys.attr,  	NULL  }; -static struct attribute_group dbs_attr_group = { -	.attrs = dbs_attributes, +static struct attribute_group cs_attr_group_gov_sys = { +	.attrs = dbs_attributes_gov_sys,  	.name = "conservative",  }; -/*** delete after deprecation time ***/ - -#define write_one_old(file_name)					\ -static ssize_t store_##file_name##_old					\ -(struct cpufreq_policy *unused, const char *buf, size_t count)		\ -{									\ -	printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "	\ -		"interface is deprecated - " #file_name "\n");	\ -	return store_##file_name(NULL, NULL, buf, count);		\ -} -write_one_old(sampling_rate); -write_one_old(sampling_down_factor); -write_one_old(up_threshold); -write_one_old(down_threshold); -write_one_old(ignore_nice_load); -write_one_old(freq_step); - -cpufreq_freq_attr_rw_old(sampling_rate); -cpufreq_freq_attr_rw_old(sampling_down_factor); -cpufreq_freq_attr_rw_old(up_threshold); -cpufreq_freq_attr_rw_old(down_threshold); -cpufreq_freq_attr_rw_old(ignore_nice_load); -cpufreq_freq_attr_rw_old(freq_step); - -static struct attribute *dbs_attributes_old[] = { -	&sampling_rate_max_old.attr, -	&sampling_rate_min_old.attr, -	&sampling_rate_old.attr, -	&sampling_down_factor_old.attr, -	&up_threshold_old.attr, -	&down_threshold_old.attr, -	&ignore_nice_load_old.attr, -	&freq_step_old.attr, +static struct attribute *dbs_attributes_gov_pol[] = { +	&sampling_rate_min_gov_pol.attr, +	&sampling_rate_gov_pol.attr, +	&sampling_down_factor_gov_pol.attr, +	&up_threshold_gov_pol.attr, +	&down_threshold_gov_pol.attr, +	&ignore_nice_load_gov_pol.attr, +	&freq_step_gov_pol.attr,  	NULL  }; -static struct attribute_group dbs_attr_group_old = { -	.attrs = dbs_attributes_old, +static struct attribute_group cs_attr_group_gov_pol = { +	.attrs = dbs_attributes_gov_pol,  	.name = "conservative",  }; -/*** delete after deprecation time ***/ -  /************************** sysfs end ************************/ -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +static int cs_init(struct dbs_data *dbs_data)  { -	unsigned int load = 0; -	unsigned int max_load = 0; -	unsigned int freq_target; +	struct cs_dbs_tuners *tuners; -	struct cpufreq_policy *policy; -	unsigned int j; - -	policy = this_dbs_info->cur_policy; - -	/* -	 * Every sampling_rate, we check, if current idle time is less -	 * than 20% (default), then we try to increase frequency -	 * Every sampling_rate*sampling_down_factor, we check, if current -	 * idle time is more than 80%, then we try to decrease frequency -	 * -	 * Any frequency increase takes it to the maximum frequency. -	 * Frequency reduction happens at minimum steps of -	 * 5% (default) of maximum frequency -	 */ - -	/* Get Absolute Load */ -	for_each_cpu(j, policy->cpus) { -		struct cpu_dbs_info_s *j_dbs_info; -		cputime64_t cur_wall_time, cur_idle_time; -		unsigned int idle_time, wall_time; - -		j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - -		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - -		wall_time = (unsigned int) cputime64_sub(cur_wall_time, -				j_dbs_info->prev_cpu_wall); -		j_dbs_info->prev_cpu_wall = cur_wall_time; - -		idle_time = (unsigned int) cputime64_sub(cur_idle_time, -				j_dbs_info->prev_cpu_idle); -		j_dbs_info->prev_cpu_idle = cur_idle_time; - -		if (dbs_tuners_ins.ignore_nice) { -			cputime64_t cur_nice; -			unsigned long cur_nice_jiffies; - -			cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, -					 j_dbs_info->prev_cpu_nice); -			/* -			 * Assumption: nice time between sampling periods will -			 * be less than 2^32 jiffies for 32 bit sys -			 */ -			cur_nice_jiffies = (unsigned long) -					cputime64_to_jiffies64(cur_nice); - -			j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; -			idle_time += jiffies_to_usecs(cur_nice_jiffies); -		} - -		if (unlikely(!wall_time || wall_time < idle_time)) -			continue; - -		load = 100 * (wall_time - idle_time) / wall_time; - -		if (load > max_load) -			max_load = load; +	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); +	if (!tuners) { +		pr_err("%s: kzalloc failed\n", __func__); +		return -ENOMEM;  	} -	/* -	 * break out if we 'cannot' reduce the speed as the user might -	 * want freq_step to be zero -	 */ -	if (dbs_tuners_ins.freq_step == 0) -		return; - -	/* Check for frequency increase */ -	if (max_load > dbs_tuners_ins.up_threshold) { -		this_dbs_info->down_skip = 0; - -		/* if we are already at full speed then break out early */ -		if (this_dbs_info->requested_freq == policy->max) -			return; - -		freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - -		/* max freq cannot be less than 100. But who knows.... */ -		if (unlikely(freq_target == 0)) -			freq_target = 5; - -		this_dbs_info->requested_freq += freq_target; -		if (this_dbs_info->requested_freq > policy->max) -			this_dbs_info->requested_freq = policy->max; - -		__cpufreq_driver_target(policy, this_dbs_info->requested_freq, -			CPUFREQ_RELATION_H); -		return; -	} - -	/* -	 * The optimal frequency is the frequency that is the lowest that -	 * can support the current CPU usage without triggering the up -	 * policy. To be safe, we focus 10 points under the threshold. -	 */ -	if (max_load < (dbs_tuners_ins.down_threshold - 10)) { -		freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - -		this_dbs_info->requested_freq -= freq_target; -		if (this_dbs_info->requested_freq < policy->min) -			this_dbs_info->requested_freq = policy->min; +	tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; +	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; +	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; +	tuners->ignore_nice_load = 0; +	tuners->freq_step = DEF_FREQUENCY_STEP; -		/* -		 * if we cannot reduce the frequency anymore, break out early -		 */ -		if (policy->cur == policy->min) -			return; - -		__cpufreq_driver_target(policy, this_dbs_info->requested_freq, -				CPUFREQ_RELATION_H); -		return; -	} +	dbs_data->tuners = tuners; +	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * +		jiffies_to_usecs(10); +	mutex_init(&dbs_data->mutex); +	return 0;  } -static void do_dbs_timer(struct work_struct *work) +static void cs_exit(struct dbs_data *dbs_data)  { -	struct cpu_dbs_info_s *dbs_info = -		container_of(work, struct cpu_dbs_info_s, work.work); -	unsigned int cpu = dbs_info->cpu; - -	/* We want all CPUs to do sampling nearly on same jiffy */ -	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - -	delay -= jiffies % delay; - -	mutex_lock(&dbs_info->timer_mutex); +	kfree(dbs_data->tuners); +} -	dbs_check_cpu(dbs_info); +define_get_cpu_dbs_routines(cs_cpu_dbs_info); -	queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); -	mutex_unlock(&dbs_info->timer_mutex); -} +static struct notifier_block cs_cpufreq_notifier_block = { +	.notifier_call = dbs_cpufreq_notifier, +}; -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ -	/* We want all CPUs to do sampling nearly on same jiffy */ -	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); -	delay -= jiffies % delay; - -	dbs_info->enable = 1; -	INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); -	queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, -				delay); -} +static struct cs_ops cs_ops = { +	.notifier_block = &cs_cpufreq_notifier_block, +}; -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ -	dbs_info->enable = 0; -	cancel_delayed_work_sync(&dbs_info->work); -} +static struct common_dbs_data cs_dbs_cdata = { +	.governor = GOV_CONSERVATIVE, +	.attr_group_gov_sys = &cs_attr_group_gov_sys, +	.attr_group_gov_pol = &cs_attr_group_gov_pol, +	.get_cpu_cdbs = get_cpu_cdbs, +	.get_cpu_dbs_info_s = get_cpu_dbs_info_s, +	.gov_dbs_timer = cs_dbs_timer, +	.gov_check_cpu = cs_check_cpu, +	.gov_ops = &cs_ops, +	.init = cs_init, +	.exit = cs_exit, +}; -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, +static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,  				   unsigned int event)  { -	unsigned int cpu = policy->cpu; -	struct cpu_dbs_info_s *this_dbs_info; -	unsigned int j; -	int rc; - -	this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); - -	switch (event) { -	case CPUFREQ_GOV_START: -		if ((!cpu_online(cpu)) || (!policy->cur)) -			return -EINVAL; - -		mutex_lock(&dbs_mutex); - -		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); -		if (rc) { -			mutex_unlock(&dbs_mutex); -			return rc; -		} - -		for_each_cpu(j, policy->cpus) { -			struct cpu_dbs_info_s *j_dbs_info; -			j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); -			j_dbs_info->cur_policy = policy; - -			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, -						&j_dbs_info->prev_cpu_wall); -			if (dbs_tuners_ins.ignore_nice) { -				j_dbs_info->prev_cpu_nice = -						kstat_cpu(j).cpustat.nice; -			} -		} -		this_dbs_info->down_skip = 0; -		this_dbs_info->requested_freq = policy->cur; - -		mutex_init(&this_dbs_info->timer_mutex); -		dbs_enable++; -		/* -		 * Start the timerschedule work, when this governor -		 * is used for first time -		 */ -		if (dbs_enable == 1) { -			unsigned int latency; -			/* policy latency is in nS. Convert it to uS first */ -			latency = policy->cpuinfo.transition_latency / 1000; -			if (latency == 0) -				latency = 1; - -			rc = sysfs_create_group(cpufreq_global_kobject, -						&dbs_attr_group); -			if (rc) { -				mutex_unlock(&dbs_mutex); -				return rc; -			} - -			/* -			 * conservative does not implement micro like ondemand -			 * governor, thus we are bound to jiffes/HZ -			 */ -			min_sampling_rate = -				MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); -			/* Bring kernel and HW constraints together */ -			min_sampling_rate = max(min_sampling_rate, -					MIN_LATENCY_MULTIPLIER * latency); -			dbs_tuners_ins.sampling_rate = -				max(min_sampling_rate, -				    latency * LATENCY_MULTIPLIER); - -			cpufreq_register_notifier( -					&dbs_cpufreq_notifier_block, -					CPUFREQ_TRANSITION_NOTIFIER); -		} -		mutex_unlock(&dbs_mutex); - -		dbs_timer_init(this_dbs_info); - -		break; - -	case CPUFREQ_GOV_STOP: -		dbs_timer_exit(this_dbs_info); - -		mutex_lock(&dbs_mutex); -		sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); -		dbs_enable--; -		mutex_destroy(&this_dbs_info->timer_mutex); - -		/* -		 * Stop the timerschedule work, when this governor -		 * is used for first time -		 */ -		if (dbs_enable == 0) -			cpufreq_unregister_notifier( -					&dbs_cpufreq_notifier_block, -					CPUFREQ_TRANSITION_NOTIFIER); - -		mutex_unlock(&dbs_mutex); -		if (!dbs_enable) -			sysfs_remove_group(cpufreq_global_kobject, -					   &dbs_attr_group); - -		break; - -	case CPUFREQ_GOV_LIMITS: -		mutex_lock(&this_dbs_info->timer_mutex); -		if (policy->max < this_dbs_info->cur_policy->cur) -			__cpufreq_driver_target( -					this_dbs_info->cur_policy, -					policy->max, CPUFREQ_RELATION_H); -		else if (policy->min > this_dbs_info->cur_policy->cur) -			__cpufreq_driver_target( -					this_dbs_info->cur_policy, -					policy->min, CPUFREQ_RELATION_L); -		mutex_unlock(&this_dbs_info->timer_mutex); - -		break; -	} -	return 0; +	return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);  }  #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE @@ -709,35 +379,21 @@ static  #endif  struct cpufreq_governor cpufreq_gov_conservative = {  	.name			= "conservative", -	.governor		= cpufreq_governor_dbs, +	.governor		= cs_cpufreq_governor_dbs,  	.max_transition_latency	= TRANSITION_LATENCY_LIMIT,  	.owner			= THIS_MODULE,  };  static int __init cpufreq_gov_dbs_init(void)  { -	int err; - -	kconservative_wq = create_workqueue("kconservative"); -	if (!kconservative_wq) { -		printk(KERN_ERR "Creation of kconservative failed\n"); -		return -EFAULT; -	} - -	err = cpufreq_register_governor(&cpufreq_gov_conservative); -	if (err) -		destroy_workqueue(kconservative_wq); - -	return err; +	return cpufreq_register_governor(&cpufreq_gov_conservative);  }  static void __exit cpufreq_gov_dbs_exit(void)  {  	cpufreq_unregister_governor(&cpufreq_gov_conservative); -	destroy_workqueue(kconservative_wq);  } -  MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");  MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "  		"Low Latency Frequency Transition capable processors "  | 
