diff options
Diffstat (limited to 'drivers/cpufreq')
78 files changed, 4052 insertions, 4588 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 534fcb82515..ffe350f86bc 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -17,15 +17,15 @@ config CPU_FREQ if CPU_FREQ -config CPU_FREQ_TABLE - tristate - config CPU_FREQ_GOV_COMMON bool +config CPU_FREQ_BOOST_SW + bool + depends on THERMAL + config CPU_FREQ_STAT tristate "CPU frequency translation statistics" - select CPU_FREQ_TABLE default y help This driver exports CPU frequency statistics information through sysfs @@ -143,7 +143,6 @@ config CPU_FREQ_GOV_USERSPACE config CPU_FREQ_GOV_ONDEMAND tristate "'ondemand' cpufreq policy governor" - select CPU_FREQ_TABLE select CPU_FREQ_GOV_COMMON help 'ondemand' - This driver adds a dynamic cpufreq policy governor. @@ -186,8 +185,10 @@ config CPU_FREQ_GOV_CONSERVATIVE config GENERIC_CPUFREQ_CPU0 tristate "Generic CPU0 cpufreq driver" - depends on HAVE_CLK && REGULATOR && PM_OPP && OF - select CPU_FREQ_TABLE + depends on HAVE_CLK && OF + # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y: + depends on !CPU_THERMAL || THERMAL + select PM_OPP help This adds a generic cpufreq driver for CPU0 frequency management. It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) @@ -201,7 +202,7 @@ source "drivers/cpufreq/Kconfig.x86" endmenu menu "ARM CPU frequency scaling drivers" -depends on ARM +depends on ARM || ARM64 source "drivers/cpufreq/Kconfig.arm" endmenu @@ -223,7 +224,6 @@ depends on IA64 config IA64_ACPI_CPUFREQ tristate "ACPI Processor P-States driver" - select CPU_FREQ_TABLE depends on ACPI_PROCESSOR help This driver adds a CPUFreq driver which utilizes the ACPI @@ -240,7 +240,6 @@ depends on MIPS config LOONGSON2_CPUFREQ tristate "Loongson2 CPUFreq Driver" - select CPU_FREQ_TABLE help This option adds a CPUFreq driver for loongson processors which support software configurable cpu frequency. @@ -262,7 +261,6 @@ menu "SPARC CPU frequency scaling drivers" depends on SPARC64 config SPARC_US3_CPUFREQ tristate "UltraSPARC-III CPU Frequency driver" - select CPU_FREQ_TABLE help This adds the CPUFreq driver for UltraSPARC-III processors. @@ -272,7 +270,6 @@ config SPARC_US3_CPUFREQ config SPARC_US2E_CPUFREQ tristate "UltraSPARC-IIe CPU Frequency driver" - select CPU_FREQ_TABLE help This adds the CPUFreq driver for UltraSPARC-IIe processors. @@ -285,7 +282,6 @@ menu "SH CPU Frequency scaling" depends on SUPERH config SH_CPU_FREQ tristate "SuperH CPU Frequency driver" - select CPU_FREQ_TABLE help This adds the cpufreq driver for SuperH. Any CPU that supports clock rate rounding through the clock framework can use this diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 0fa204b244b..7364a538e05 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -2,10 +2,11 @@ # ARM CPU Frequency scaling drivers # +# big LITTLE core layer and glue drivers config ARM_BIG_LITTLE_CPUFREQ tristate "Generic ARM big LITTLE CPUfreq driver" - depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK - select CPU_FREQ_TABLE + depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK + select PM_OPP help This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. @@ -16,9 +17,16 @@ config ARM_DT_BL_CPUFREQ This enables probing via DT for Generic CPUfreq driver for ARM big.LITTLE platform. This gets frequency tables from DT. +config ARM_VEXPRESS_SPC_CPUFREQ + tristate "Versatile Express SPC based CPUfreq driver" + depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC + help + This add the CPUfreq driver support for Versatile Express + big.LITTLE platforms using SPC for power management. + + config ARM_EXYNOS_CPUFREQ bool - select CPU_FREQ_TABLE config ARM_EXYNOS4210_CPUFREQ bool "SAMSUNG EXYNOS4210" @@ -33,7 +41,7 @@ config ARM_EXYNOS4210_CPUFREQ config ARM_EXYNOS4X12_CPUFREQ bool "SAMSUNG EXYNOS4x12" - depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) + depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 default y select ARM_EXYNOS_CPUFREQ help @@ -56,9 +64,9 @@ config ARM_EXYNOS5250_CPUFREQ config ARM_EXYNOS5440_CPUFREQ bool "SAMSUNG EXYNOS5440" depends on SOC_EXYNOS5440 - depends on HAVE_CLK && PM_OPP && OF + depends on HAVE_CLK && OF + select PM_OPP default y - select CPU_FREQ_TABLE help This adds the CPUFreq driver for Samsung EXYNOS5440 SoC. The nature of exynos5440 clock controller is @@ -67,13 +75,24 @@ config ARM_EXYNOS5440_CPUFREQ If in doubt, say N. +config ARM_EXYNOS_CPU_FREQ_BOOST_SW + bool "EXYNOS Frequency Overclocking - Software" + depends on ARM_EXYNOS_CPUFREQ + select CPU_FREQ_BOOST_SW + select EXYNOS_THERMAL + help + This driver supports software managed overclocking (BOOST). + It allows usage of special frequencies for Samsung Exynos + processors if thermal conditions are appropriate. + + It requires, for safe operation, thermal framework with properly + defined trip points. + + If in doubt, say N. + config ARM_HIGHBANK_CPUFREQ tristate "Calxeda Highbank-based" - depends on ARCH_HIGHBANK - select GENERIC_CPUFREQ_CPU0 - select PM_OPP - select REGULATOR - + depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR default m help This adds the CPUFreq driver for Calxeda Highbank SoC @@ -82,12 +101,12 @@ config ARM_HIGHBANK_CPUFREQ If in doubt, say N. config ARM_IMX6Q_CPUFREQ - tristate "Freescale i.MX6Q cpufreq support" - depends on SOC_IMX6Q + tristate "Freescale i.MX6 cpufreq support" + depends on ARCH_MXC depends on REGULATOR_ANATOP - select CPU_FREQ_TABLE + select PM_OPP help - This adds cpufreq driver support for Freescale i.MX6Q SOC. + This adds cpufreq driver support for Freescale i.MX6 series SoCs. If in doubt, say N. @@ -100,8 +119,7 @@ config ARM_INTEGRATOR If in doubt, say Y. config ARM_KIRKWOOD_CPUFREQ - def_bool ARCH_KIRKWOOD && OF - select CPU_FREQ_TABLE + def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD help This adds the CPUFreq driver for Marvell Kirkwood SoCs. @@ -110,7 +128,6 @@ config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" depends on ARCH_OMAP2PLUS default ARCH_OMAP2PLUS - select CPU_FREQ_TABLE config ARM_S3C_CPUFREQ bool @@ -165,13 +182,12 @@ config ARM_S3C2412_CPUFREQ config ARM_S3C2416_CPUFREQ bool "S3C2416 CPU Frequency scaling support" depends on CPU_S3C2416 - select CPU_FREQ_TABLE help This adds the CPUFreq driver for the Samsung S3C2416 and S3C2450 SoC. The S3C2416 supports changing the rate of the armdiv clock source and also entering a so called dynamic voltage scaling mode in which it is possible to reduce the - core voltage of the cpu. + core voltage of the CPU. If in doubt, say N. @@ -196,7 +212,6 @@ config ARM_S3C2440_CPUFREQ config ARM_S3C64XX_CPUFREQ bool "Samsung S3C64XX" depends on CPU_S3C6410 - select CPU_FREQ_TABLE default y help This adds the CPUFreq driver for Samsung S3C6410 SoC. @@ -206,7 +221,6 @@ config ARM_S3C64XX_CPUFREQ config ARM_S5PV210_CPUFREQ bool "Samsung S5PV210 and S5PC110" depends on CPU_S5PV210 - select CPU_FREQ_TABLE default y help This adds the CPUFreq driver for Samsung S5PV210 and @@ -223,7 +237,6 @@ config ARM_SA1110_CPUFREQ config ARM_SPEAR_CPUFREQ bool "SPEAr CPUFreq support" depends on PLAT_SPEAR - select CPU_FREQ_TABLE default y help This adds the CPUFreq driver support for SPEAr SOCs. @@ -231,7 +244,6 @@ config ARM_SPEAR_CPUFREQ config ARM_TEGRA_CPUFREQ bool "TEGRA CPUFreq support" depends on ARCH_TEGRA - select CPU_FREQ_TABLE default y help This adds the CPUFreq driver support for TEGRA SOCs. diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 25ca9db62e0..72564b701b4 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc @@ -1,7 +1,6 @@ config CPU_FREQ_CBE tristate "CBE frequency scaling" depends on CBE_RAS && PPC_CELL - select CPU_FREQ_TABLE default m help This adds the cpufreq driver for Cell BE processors. @@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI config CPU_FREQ_MAPLE bool "Support for Maple 970FX Evaluation Board" depends on PPC_MAPLE - select CPU_FREQ_TABLE help This adds support for frequency switching on Maple 970FX Evaluation Board and compatible boards (IBM JS2x blades). @@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE config PPC_CORENET_CPUFREQ tristate "CPU frequency scaling driver for Freescale E500MC SoCs" depends on PPC_E500MC && OF && COMMON_CLK - select CPU_FREQ_TABLE select CLK_PPC_CORENET help This adds the CPUFreq driver support for Freescale e500mc, @@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ config CPU_FREQ_PMAC bool "Support for Apple PowerBooks" depends on ADB_PMU && PPC32 - select CPU_FREQ_TABLE help This adds support for frequency switching on Apple PowerBooks, this currently includes some models of iBook & Titanium @@ -47,7 +43,6 @@ config CPU_FREQ_PMAC config CPU_FREQ_PMAC64 bool "Support for some Apple G5s" depends on PPC_PMAC && PPC64 - select CPU_FREQ_TABLE help This adds support for frequency switching on Apple iMac G5, and some of the more recent desktop G5 machines as well. @@ -55,8 +50,15 @@ config CPU_FREQ_PMAC64 config PPC_PASEMI_CPUFREQ bool "Support for PA Semi PWRficient" depends on PPC_PASEMI - select CPU_FREQ_TABLE default y help This adds the support for frequency switching on PA Semi PWRficient processors. + +config POWERNV_CPUFREQ + tristate "CPU frequency scaling for IBM POWERNV platform" + depends on PPC_POWERNV + default y + help + This adds support for CPU frequency switching on IBM POWERNV + platform diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index e2b6eabef22..89ae88f9189 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -10,7 +10,7 @@ config X86_INTEL_PSTATE The driver implements an internal governor and will become the scaling driver and governor for Sandy bridge processors. - When this driver is enabled it will become the perferred + When this driver is enabled it will become the preferred scaling driver for Sandy bridge processors. If in doubt, say N. @@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ config X86_ACPI_CPUFREQ tristate "ACPI Processor P-States driver" - select CPU_FREQ_TABLE depends on ACPI_PROCESSOR help This driver adds a CPUFreq driver which utilizes the ACPI @@ -53,14 +52,13 @@ config X86_ACPI_CPUFREQ_CPB help The powernow-k8 driver used to provide a sysfs knob called "cpb" to disable the Core Performance Boosting feature of AMD CPUs. This - file has now been superseeded by the more generic "boost" entry. + file has now been superseded by the more generic "boost" entry. By enabling this option the acpi_cpufreq driver provides the old entry in addition to the new boost ones, for compatibility reasons. config ELAN_CPUFREQ tristate "AMD Elan SC400 and SC410" - select CPU_FREQ_TABLE depends on MELAN ---help--- This adds the CPUFreq driver for AMD Elan SC400 and SC410 @@ -76,7 +74,6 @@ config ELAN_CPUFREQ config SC520_CPUFREQ tristate "AMD Elan SC520" - select CPU_FREQ_TABLE depends on MELAN ---help--- This adds the CPUFreq driver for AMD Elan SC520 processor. @@ -88,7 +85,6 @@ config SC520_CPUFREQ config X86_POWERNOW_K6 tristate "AMD Mobile K6-2/K6-3 PowerNow!" - select CPU_FREQ_TABLE depends on X86_32 help This adds the CPUFreq driver for mobile AMD K6-2+ and mobile @@ -100,7 +96,6 @@ config X86_POWERNOW_K6 config X86_POWERNOW_K7 tristate "AMD Mobile Athlon/Duron PowerNow!" - select CPU_FREQ_TABLE depends on X86_32 help This adds the CPUFreq driver for mobile AMD K7 mobile processors. @@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI config X86_POWERNOW_K8 tristate "AMD Opteron/Athlon64 PowerNow!" - select CPU_FREQ_TABLE depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ help This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. @@ -132,11 +126,10 @@ config X86_POWERNOW_K8 config X86_AMD_FREQ_SENSITIVITY tristate "AMD frequency sensitivity feedback powersave bias" depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD - select CPU_FREQ_TABLE help This adds AMD-specific powersave bias function to the ondemand governor, which allows it to make more power-conscious frequency - change decisions based on feedback from hardware (availble on AMD + change decisions based on feedback from hardware (available on AMD Family 16h and above). Hardware feedback tells software how "sensitive" to frequency changes @@ -160,7 +153,6 @@ config X86_GX_SUSPMOD config X86_SPEEDSTEP_CENTRINO tristate "Intel Enhanced SpeedStep (deprecated)" - select CPU_FREQ_TABLE select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32 depends on X86_32 || (X86_64 && ACPI_PROCESSOR) help @@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE config X86_SPEEDSTEP_ICH tristate "Intel Speedstep on ICH-M chipsets (ioport interface)" - select CPU_FREQ_TABLE depends on X86_32 help This adds the CPUFreq driver for certain mobile Intel Pentium III @@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH config X86_SPEEDSTEP_SMI tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)" - select CPU_FREQ_TABLE depends on X86_32 help This adds the CPUFreq driver for certain mobile Intel Pentium III @@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI config X86_P4_CLOCKMOD tristate "Intel Pentium 4 clock modulation" - select CPU_FREQ_TABLE help This adds the CPUFreq driver for Intel Pentium 4 / XEON processors. When enabled it will lower CPU temperature by skipping @@ -259,7 +248,6 @@ config X86_LONGRUN config X86_LONGHAUL tristate "VIA Cyrix III Longhaul" - select CPU_FREQ_TABLE depends on X86_32 && ACPI_PROCESSOR help This adds the CPUFreq driver for VIA Samuel/CyrixIII, @@ -272,7 +260,6 @@ config X86_LONGHAUL config X86_E_POWERSAVER tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" - select CPU_FREQ_TABLE depends on X86_32 && ACPI_PROCESSOR help This adds the CPUFreq driver for VIA C7 processors. However, this driver diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ad5866c2ada..db6d9a2fea4 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -1,5 +1,7 @@ # CPUfreq core -obj-$(CONFIG_CPU_FREQ) += cpufreq.o +obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o +obj-$(CONFIG_PM_OPP) += cpufreq_opp.o + # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o @@ -11,9 +13,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o -# CPUfreq cross-arch helpers -obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o - obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o ################################################################################## @@ -50,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o @@ -77,6 +76,7 @@ obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o +obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o ################################################################################## # PowerPC platform drivers @@ -88,6 +88,7 @@ obj-$(CONFIG_PPC_CORENET_CPUFREQ) += ppc-corenet-cpufreq.o obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o +obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o ################################################################################## # Other platform drivers diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index a1260b4549d..b0c18ed8d83 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -80,7 +80,6 @@ static struct acpi_processor_performance __percpu *acpi_perf_data; static struct cpufreq_driver acpi_cpufreq_driver; static unsigned int acpi_pstate_strict; -static bool boost_enabled, boost_supported; static struct msr __percpu *msrs; static bool boost_state(unsigned int cpu) @@ -133,49 +132,16 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask) wrmsr_on_cpus(cpumask, msr_addr, msrs); } -static ssize_t _store_boost(const char *buf, size_t count) +static int _store_boost(int val) { - int ret; - unsigned long val = 0; - - if (!boost_supported) - return -EINVAL; - - ret = kstrtoul(buf, 10, &val); - if (ret || (val > 1)) - return -EINVAL; - - if ((val && boost_enabled) || (!val && !boost_enabled)) - return count; - get_online_cpus(); - boost_set_msrs(val, cpu_online_mask); - put_online_cpus(); - - boost_enabled = val; pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); - return count; -} - -static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) -{ - return _store_boost(buf, count); -} - -static ssize_t show_global_boost(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", boost_enabled); + return 0; } -static struct global_attr global_boost = __ATTR(boost, 0644, - show_global_boost, - store_global_boost); - static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); @@ -186,15 +152,32 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) cpufreq_freq_attr_ro(freqdomain_cpus); #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB +static ssize_t store_boost(const char *buf, size_t count) +{ + int ret; + unsigned long val = 0; + + if (!acpi_cpufreq_driver.boost_supported) + return -EINVAL; + + ret = kstrtoul(buf, 10, &val); + if (ret || (val > 1)) + return -EINVAL; + + _store_boost((int) val); + + return count; +} + static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, size_t count) { - return _store_boost(buf, count); + return store_boost(buf, count); } static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) { - return sprintf(buf, "%u\n", boost_enabled); + return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled); } cpufreq_freq_attr_rw(cpb); @@ -230,7 +213,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) { - int i; + struct cpufreq_frequency_table *pos; struct acpi_processor_performance *perf; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) @@ -240,10 +223,9 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) perf = data->acpi_data; - for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { - if (msr == perf->states[data->freq_table[i].driver_data].status) - return data->freq_table[i].frequency; - } + cpufreq_for_each_entry(pos, data->freq_table) + if (msr == perf->states[pos->driver_data].status) + return pos->frequency; return data->freq_table[0].frequency; } @@ -424,34 +406,21 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, } static int acpi_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_processor_performance *perf; - struct cpufreq_freqs freqs; struct drv_cmd cmd; - unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ int result = 0; - pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); - if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { return -ENODEV; } perf = data->acpi_data; - result = cpufreq_frequency_table_target(policy, - data->freq_table, - target_freq, - relation, &next_state); - if (unlikely(result)) { - result = -ENODEV; - goto out; - } - - next_perf_state = data->freq_table[next_state].driver_data; + next_perf_state = data->freq_table[index].driver_data; if (perf->state == next_perf_state) { if (unlikely(data->resume)) { pr_debug("Called after resume, resetting to P%d\n", @@ -492,23 +461,17 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, else cmd.mask = cpumask_of(policy->cpu); - freqs.old = perf->states[perf->state].core_frequency * 1000; - freqs.new = data->freq_table[next_state].frequency; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - drv_write(&cmd); if (acpi_pstate_strict) { - if (!check_freqs(cmd.mask, freqs.new, data)) { + if (!check_freqs(cmd.mask, data->freq_table[index].frequency, + data)) { pr_debug("acpi_cpufreq_target failed (%d)\n", policy->cpu); result = -EAGAIN; - freqs.new = freqs.old; } } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - if (!result) perf->state = next_perf_state; @@ -516,15 +479,6 @@ out: return result; } -static int acpi_cpufreq_verify(struct cpufreq_policy *policy) -{ - struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - - pr_debug("acpi_cpufreq_verify\n"); - - return cpufreq_frequency_table_verify(policy, data->freq_table); -} - static unsigned long acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) { @@ -582,7 +536,7 @@ static int boost_notify(struct notifier_block *nb, unsigned long action, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - boost_set_msrs(boost_enabled, cpumask); + boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask); break; case CPU_DOWN_PREPARE: @@ -799,7 +753,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_unreg; } - data->freq_table = kmalloc(sizeof(*data->freq_table) * + data->freq_table = kzalloc(sizeof(*data->freq_table) * (perf->state_count+1), GFP_KERNEL); if (!data->freq_table) { result = -ENOMEM; @@ -837,7 +791,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; perf->state = 0; - result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); + result = cpufreq_table_validate_and_show(policy, data->freq_table); if (result) goto err_freqfree; @@ -846,12 +800,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: - /* Current speed is unknown and not detectable by IO port */ + /* + * The core will not set policy->cur, because + * cpufreq_driver->get is NULL, so we need to set it here. + * However, we have to guess it, because the current speed is + * unknown and not detectable via IO ports. + */ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); break; case ACPI_ADR_SPACE_FIXED_HARDWARE: acpi_cpufreq_driver.get = get_cur_freq_on_cpu; - policy->cur = get_cur_freq_on_cpu(cpu); break; default: break; @@ -868,8 +826,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) (u32) perf->states[i].power, (u32) perf->states[i].transition_latency); - cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); - /* * the first call to ->target() should result in us actually * writing something to the appropriate registers. @@ -898,7 +854,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { - cpufreq_frequency_table_put_attr(policy->cpu); per_cpu(acfreq_data, policy->cpu) = NULL; acpi_processor_unregister_performance(data->acpi_data, policy->cpu); @@ -929,14 +884,15 @@ static struct freq_attr *acpi_cpufreq_attr[] = { }; static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = acpi_cpufreq_verify, - .target = acpi_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = acpi_cpufreq_target, .bios_limit = acpi_processor_get_bios_limit, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, .resume = acpi_cpufreq_resume, .name = "acpi-cpufreq", .attr = acpi_cpufreq_attr, + .set_boost = _store_boost, }; static void __init acpi_cpufreq_boost_init(void) @@ -947,33 +903,23 @@ static void __init acpi_cpufreq_boost_init(void) if (!msrs) return; - boost_supported = true; - boost_enabled = boost_state(0); + acpi_cpufreq_driver.boost_supported = true; + acpi_cpufreq_driver.boost_enabled = boost_state(0); - get_online_cpus(); + cpu_notifier_register_begin(); /* Force all MSRs to the same value */ - boost_set_msrs(boost_enabled, cpu_online_mask); + boost_set_msrs(acpi_cpufreq_driver.boost_enabled, + cpu_online_mask); - register_cpu_notifier(&boost_nb); + __register_cpu_notifier(&boost_nb); - put_online_cpus(); - } else - global_boost.attr.mode = 0444; - - /* We create the boost file in any case, though for systems without - * hardware support it will be read-only and hardwired to return 0. - */ - if (cpufreq_sysfs_create_file(&(global_boost.attr))) - pr_warn(PFX "could not register global boost sysfs file\n"); - else - pr_debug("registered global boost sysfs file\n"); + cpu_notifier_register_done(); + } } -static void __exit acpi_cpufreq_boost_exit(void) +static void acpi_cpufreq_boost_exit(void) { - cpufreq_sysfs_remove_file(&(global_boost.attr)); - if (msrs) { unregister_cpu_notifier(&boost_nb); @@ -987,7 +933,11 @@ static int __init acpi_cpufreq_init(void) int ret; if (acpi_disabled) - return 0; + return -ENODEV; + + /* don't keep reloading if cpufreq_driver exists */ + if (cpufreq_get_current_driver()) + return -EEXIST; pr_debug("acpi_cpufreq_init\n"); @@ -1015,13 +965,13 @@ static int __init acpi_cpufreq_init(void) *iter = &cpb; } #endif + acpi_cpufreq_boost_init(); ret = cpufreq_register_driver(&acpi_cpufreq_driver); - if (ret) + if (ret) { free_acpi_perf_data(); - else - acpi_cpufreq_boost_init(); - + acpi_cpufreq_boost_exit(); + } return ret; } diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 3549f0784af..1f4d4e31505 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -24,110 +24,323 @@ #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/export.h> +#include <linux/mutex.h> #include <linux/of_platform.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/topology.h> #include <linux/types.h> +#include <asm/bL_switcher.h> #include "arm_big_little.h" /* Currently we support only two clusters */ +#define A15_CLUSTER 0 +#define A7_CLUSTER 1 #define MAX_CLUSTERS 2 +#ifdef CONFIG_BL_SWITCHER +static bool bL_switching_enabled; +#define is_bL_switching_enabled() bL_switching_enabled +#define set_switching_enabled(x) (bL_switching_enabled = (x)) +#else +#define is_bL_switching_enabled() false +#define set_switching_enabled(x) do { } while (0) +#endif + +#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) +#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) + static struct cpufreq_arm_bL_ops *arm_bL_ops; static struct clk *clk[MAX_CLUSTERS]; -static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; -static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; +static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; +static atomic_t cluster_usage[MAX_CLUSTERS + 1]; + +static unsigned int clk_big_min; /* (Big) clock frequencies */ +static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ + +static DEFINE_PER_CPU(unsigned int, physical_cluster); +static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); -static unsigned int bL_cpufreq_get(unsigned int cpu) +static struct mutex cluster_lock[MAX_CLUSTERS]; + +static inline int raw_cpu_to_cluster(int cpu) { - u32 cur_cluster = cpu_to_cluster(cpu); + return topology_physical_package_id(cpu); +} - return clk_get_rate(clk[cur_cluster]) / 1000; +static inline int cpu_to_cluster(int cpu) +{ + return is_bL_switching_enabled() ? + MAX_CLUSTERS : raw_cpu_to_cluster(cpu); } -/* Validate policy frequency range */ -static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy) +static unsigned int find_cluster_maxfreq(int cluster) { - u32 cur_cluster = cpu_to_cluster(policy->cpu); + int j; + u32 max_freq = 0, cpu_freq; - return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]); + for_each_online_cpu(j) { + cpu_freq = per_cpu(cpu_last_req_freq, j); + + if ((cluster == per_cpu(physical_cluster, j)) && + (max_freq < cpu_freq)) + max_freq = cpu_freq; + } + + pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster, + max_freq); + + return max_freq; +} + +static unsigned int clk_get_cpu_rate(unsigned int cpu) +{ + u32 cur_cluster = per_cpu(physical_cluster, cpu); + u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; + + /* For switcher we use virtual A7 clock rates */ + if (is_bL_switching_enabled()) + rate = VIRT_FREQ(cur_cluster, rate); + + pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, + cur_cluster, rate); + + return rate; +} + +static unsigned int bL_cpufreq_get_rate(unsigned int cpu) +{ + if (is_bL_switching_enabled()) { + pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, + cpu)); + + return per_cpu(cpu_last_req_freq, cpu); + } else { + return clk_get_cpu_rate(cpu); + } +} + +static unsigned int +bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) +{ + u32 new_rate, prev_rate; + int ret; + bool bLs = is_bL_switching_enabled(); + + mutex_lock(&cluster_lock[new_cluster]); + + if (bLs) { + prev_rate = per_cpu(cpu_last_req_freq, cpu); + per_cpu(cpu_last_req_freq, cpu) = rate; + per_cpu(physical_cluster, cpu) = new_cluster; + + new_rate = find_cluster_maxfreq(new_cluster); + new_rate = ACTUAL_FREQ(new_cluster, new_rate); + } else { + new_rate = rate; + } + + pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", + __func__, cpu, old_cluster, new_cluster, new_rate); + + ret = clk_set_rate(clk[new_cluster], new_rate * 1000); + if (WARN_ON(ret)) { + pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, + new_cluster); + if (bLs) { + per_cpu(cpu_last_req_freq, cpu) = prev_rate; + per_cpu(physical_cluster, cpu) = old_cluster; + } + + mutex_unlock(&cluster_lock[new_cluster]); + + return ret; + } + + mutex_unlock(&cluster_lock[new_cluster]); + + /* Recalc freq for old cluster when switching clusters */ + if (old_cluster != new_cluster) { + pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", + __func__, cpu, old_cluster, new_cluster); + + /* Switch cluster */ + bL_switch_request(cpu, new_cluster); + + mutex_lock(&cluster_lock[old_cluster]); + + /* Set freq of old cluster if there are cpus left on it */ + new_rate = find_cluster_maxfreq(old_cluster); + new_rate = ACTUAL_FREQ(old_cluster, new_rate); + + if (new_rate) { + pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", + __func__, old_cluster, new_rate); + + if (clk_set_rate(clk[old_cluster], new_rate * 1000)) + pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", + __func__, ret, old_cluster); + } + mutex_unlock(&cluster_lock[old_cluster]); + } + + return 0; } /* Set clock frequency */ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { - struct cpufreq_freqs freqs; - u32 cpu = policy->cpu, freq_tab_idx, cur_cluster; - int ret = 0; + u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; + unsigned int freqs_new; + + cur_cluster = cpu_to_cluster(cpu); + new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); + + freqs_new = freq_table[cur_cluster][index].frequency; + + if (is_bL_switching_enabled()) { + if ((actual_cluster == A15_CLUSTER) && + (freqs_new < clk_big_min)) { + new_cluster = A7_CLUSTER; + } else if ((actual_cluster == A7_CLUSTER) && + (freqs_new > clk_little_max)) { + new_cluster = A15_CLUSTER; + } + } - cur_cluster = cpu_to_cluster(policy->cpu); + return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); +} - freqs.old = bL_cpufreq_get(policy->cpu); +static inline u32 get_table_count(struct cpufreq_frequency_table *table) +{ + int count; - /* Determine valid target frequency using freq_table */ - cpufreq_frequency_table_target(policy, freq_table[cur_cluster], - target_freq, relation, &freq_tab_idx); - freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency; + for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) + ; - pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n", - __func__, cpu, cur_cluster, freqs.old, target_freq, - freqs.new); + return count; +} - if (freqs.old == freqs.new) - return 0; +/* get the minimum frequency in the cpufreq_frequency_table */ +static inline u32 get_table_min(struct cpufreq_frequency_table *table) +{ + struct cpufreq_frequency_table *pos; + uint32_t min_freq = ~0; + cpufreq_for_each_entry(pos, table) + if (pos->frequency < min_freq) + min_freq = pos->frequency; + return min_freq; +} - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); +/* get the maximum frequency in the cpufreq_frequency_table */ +static inline u32 get_table_max(struct cpufreq_frequency_table *table) +{ + struct cpufreq_frequency_table *pos; + uint32_t max_freq = 0; + cpufreq_for_each_entry(pos, table) + if (pos->frequency > max_freq) + max_freq = pos->frequency; + return max_freq; +} - ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000); - if (ret) { - pr_err("clk_set_rate failed: %d\n", ret); - freqs.new = freqs.old; +static int merge_cluster_tables(void) +{ + int i, j, k = 0, count = 1; + struct cpufreq_frequency_table *table; + + for (i = 0; i < MAX_CLUSTERS; i++) + count += get_table_count(freq_table[i]); + + table = kzalloc(sizeof(*table) * count, GFP_KERNEL); + if (!table) + return -ENOMEM; + + freq_table[MAX_CLUSTERS] = table; + + /* Add in reverse order to get freqs in increasing order */ + for (i = MAX_CLUSTERS - 1; i >= 0; i--) { + for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; + j++) { + table[k].frequency = VIRT_FREQ(i, + freq_table[i][j].frequency); + pr_debug("%s: index: %d, freq: %d\n", __func__, k, + table[k].frequency); + k++; + } } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + table[k].driver_data = k; + table[k].frequency = CPUFREQ_TABLE_END; - return ret; + pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); + + return 0; +} + +static void _put_cluster_clk_and_freq_table(struct device *cpu_dev) +{ + u32 cluster = raw_cpu_to_cluster(cpu_dev->id); + + if (!freq_table[cluster]) + return; + + clk_put(clk[cluster]); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); + dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); } static void put_cluster_clk_and_freq_table(struct device *cpu_dev) { u32 cluster = cpu_to_cluster(cpu_dev->id); + int i; - if (!atomic_dec_return(&cluster_usage[cluster])) { - clk_put(clk[cluster]); - opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); - dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); + if (atomic_dec_return(&cluster_usage[cluster])) + return; + + if (cluster < MAX_CLUSTERS) + return _put_cluster_clk_and_freq_table(cpu_dev); + + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + if (!cdev) { + pr_err("%s: failed to get cpu%d device\n", __func__, i); + return; + } + + _put_cluster_clk_and_freq_table(cdev); } + + /* free virtual table */ + kfree(freq_table[cluster]); } -static int get_cluster_clk_and_freq_table(struct device *cpu_dev) +static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) { - u32 cluster = cpu_to_cluster(cpu_dev->id); + u32 cluster = raw_cpu_to_cluster(cpu_dev->id); char name[14] = "cpu-cluster."; int ret; - if (atomic_inc_return(&cluster_usage[cluster]) != 1) + if (freq_table[cluster]) return 0; ret = arm_bL_ops->init_opp_table(cpu_dev); if (ret) { dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); - goto atomic_dec; + goto out; } - ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); if (ret) { dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); - goto atomic_dec; + goto out; } name[12] = cluster + '0'; - clk[cluster] = clk_get_sys(name, NULL); + clk[cluster] = clk_get(cpu_dev, name); if (!IS_ERR(clk[cluster])) { dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", __func__, clk[cluster], freq_table[cluster], @@ -138,15 +351,74 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev) dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", __func__, cpu_dev->id, cluster); ret = PTR_ERR(clk[cluster]); - opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); -atomic_dec: - atomic_dec(&cluster_usage[cluster]); +out: dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, cluster); return ret; } +static int get_cluster_clk_and_freq_table(struct device *cpu_dev) +{ + u32 cluster = cpu_to_cluster(cpu_dev->id); + int i, ret; + + if (atomic_inc_return(&cluster_usage[cluster]) != 1) + return 0; + + if (cluster < MAX_CLUSTERS) { + ret = _get_cluster_clk_and_freq_table(cpu_dev); + if (ret) + atomic_dec(&cluster_usage[cluster]); + return ret; + } + + /* + * Get data for all clusters and fill virtual cluster with a merge of + * both + */ + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + if (!cdev) { + pr_err("%s: failed to get cpu%d device\n", __func__, i); + return -ENODEV; + } + + ret = _get_cluster_clk_and_freq_table(cdev); + if (ret) + goto put_clusters; + } + + ret = merge_cluster_tables(); + if (ret) + goto put_clusters; + + /* Assuming 2 cluster, set clk_big_min and clk_little_max */ + clk_big_min = get_table_min(freq_table[0]); + clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); + + pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", + __func__, cluster, clk_big_min, clk_little_max); + + return 0; + +put_clusters: + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + if (!cdev) { + pr_err("%s: failed to get cpu%d device\n", __func__, i); + return -ENODEV; + } + + _put_cluster_clk_and_freq_table(cdev); + } + + atomic_dec(&cluster_usage[cluster]); + + return ret; +} + /* Per-CPU initialization */ static int bL_cpufreq_init(struct cpufreq_policy *policy) { @@ -165,7 +437,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) if (ret) return ret; - ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]); + ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); if (ret) { dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", policy->cpu, cur_cluster); @@ -173,7 +445,17 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) return ret; } - cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu); + if (cur_cluster < MAX_CLUSTERS) { + int cpu; + + cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + + for_each_cpu(cpu, policy->cpus) + per_cpu(physical_cluster, cpu) = cur_cluster; + } else { + /* Assumption: during init, we are always running on A15 */ + per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; + } if (arm_bL_ops->get_transition_latency) policy->cpuinfo.transition_latency = @@ -181,9 +463,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) else policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = bL_cpufreq_get(policy->cpu); - - cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + if (is_bL_switching_enabled()) + per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); return 0; @@ -206,27 +487,54 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy) return 0; } -/* Export freq_table to sysfs */ -static struct freq_attr *bL_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver bL_cpufreq_driver = { .name = "arm-big-little", - .flags = CPUFREQ_STICKY, - .verify = bL_cpufreq_verify_policy, - .target = bL_cpufreq_set_target, - .get = bL_cpufreq_get, + .flags = CPUFREQ_STICKY | + CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = bL_cpufreq_set_target, + .get = bL_cpufreq_get_rate, .init = bL_cpufreq_init, .exit = bL_cpufreq_exit, - .have_governor_per_policy = true, - .attr = bL_cpufreq_attr, + .attr = cpufreq_generic_attr, +}; + +static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, + unsigned long action, void *_arg) +{ + pr_debug("%s: action: %ld\n", __func__, action); + + switch (action) { + case BL_NOTIFY_PRE_ENABLE: + case BL_NOTIFY_PRE_DISABLE: + cpufreq_unregister_driver(&bL_cpufreq_driver); + break; + + case BL_NOTIFY_POST_ENABLE: + set_switching_enabled(true); + cpufreq_register_driver(&bL_cpufreq_driver); + break; + + case BL_NOTIFY_POST_DISABLE: + set_switching_enabled(false); + cpufreq_register_driver(&bL_cpufreq_driver); + break; + + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static struct notifier_block bL_switcher_notifier = { + .notifier_call = bL_cpufreq_switcher_notifier, }; int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) { - int ret; + int ret, i; if (arm_bL_ops) { pr_debug("%s: Already registered: %s, exiting\n", __func__, @@ -241,16 +549,29 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) arm_bL_ops = ops; + ret = bL_switcher_get_enabled(); + set_switching_enabled(ret); + + for (i = 0; i < MAX_CLUSTERS; i++) + mutex_init(&cluster_lock[i]); + ret = cpufreq_register_driver(&bL_cpufreq_driver); if (ret) { pr_info("%s: Failed registering platform driver: %s, err: %d\n", __func__, ops->name, ret); arm_bL_ops = NULL; } else { - pr_info("%s: Registered platform driver: %s\n", __func__, - ops->name); + ret = bL_switcher_register_notifier(&bL_switcher_notifier); + if (ret) { + cpufreq_unregister_driver(&bL_cpufreq_driver); + arm_bL_ops = NULL; + } else { + pr_info("%s: Registered platform driver: %s\n", + __func__, ops->name); + } } + bL_switcher_put_enabled(); return ret; } EXPORT_SYMBOL_GPL(bL_cpufreq_register); @@ -263,7 +584,10 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) return; } + bL_switcher_get_enabled(); + bL_switcher_unregister_notifier(&bL_switcher_notifier); cpufreq_unregister_driver(&bL_cpufreq_driver); + bL_switcher_put_enabled(); pr_info("%s: Un-registered platform driver: %s\n", __func__, arm_bL_ops->name); arm_bL_ops = NULL; diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h index 79b2ce17884..70f18fc12d4 100644 --- a/drivers/cpufreq/arm_big_little.h +++ b/drivers/cpufreq/arm_big_little.h @@ -34,11 +34,6 @@ struct cpufreq_arm_bL_ops { int (*init_opp_table)(struct device *cpu_dev); }; -static inline int cpu_to_cluster(int cpu) -{ - return topology_physical_package_id(cpu); -} - int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 480c0bd0468..8d9d5910890 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c @@ -24,7 +24,7 @@ #include <linux/export.h> #include <linux/module.h> #include <linux/of_device.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c index e0c38d93899..7b612c8bb09 100644 --- a/drivers/cpufreq/at32ap-cpufreq.c +++ b/drivers/cpufreq/at32ap-cpufreq.c @@ -19,99 +19,104 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/slab.h> -static struct clk *cpuclk; - -static int at32_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; -} - -static unsigned int at32_get_speed(unsigned int cpu) -{ - /* No SMP support */ - if (cpu) - return 0; - return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000); -} +static struct cpufreq_frequency_table *freq_table; static unsigned int ref_freq; static unsigned long loops_per_jiffy_ref; -static int at32_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) { - struct cpufreq_freqs freqs; - long freq; - - /* Convert target_freq from kHz to Hz */ - freq = clk_round_rate(cpuclk, target_freq * 1000); - - /* Check if policy->min <= new_freq <= policy->max */ - if(freq < (policy->min * 1000) || freq > (policy->max * 1000)) - return -EINVAL; + unsigned int old_freq, new_freq; - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - - freqs.old = at32_get_speed(0); - freqs.new = (freq + 500) / 1000; - freqs.flags = 0; + old_freq = policy->cur; + new_freq = freq_table[index].frequency; if (!ref_freq) { - ref_freq = freqs.old; + ref_freq = old_freq; loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - if (freqs.old < freqs.new) + if (old_freq < new_freq) boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, freqs.new); - clk_set_rate(cpuclk, freq); - if (freqs.new < freqs.old) + loops_per_jiffy_ref, ref_freq, new_freq); + clk_set_rate(policy->clk, new_freq * 1000); + if (new_freq < old_freq) boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, freqs.new); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: set frequency %lu Hz\n", freq); + loops_per_jiffy_ref, ref_freq, new_freq); return 0; } -static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) +static int at32_cpufreq_driver_init(struct cpufreq_policy *policy) { + unsigned int frequency, rate, min_freq; + struct clk *cpuclk; + int retval, steps, i; + if (policy->cpu != 0) return -EINVAL; cpuclk = clk_get(NULL, "cpu"); if (IS_ERR(cpuclk)) { pr_debug("cpufreq: could not get CPU clk\n"); - return PTR_ERR(cpuclk); + retval = PTR_ERR(cpuclk); + goto out_err; } - policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; + min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; + frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; policy->cpuinfo.transition_latency = 0; - policy->cur = at32_get_speed(0); - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - printk("cpufreq: AT32AP CPU frequency driver\n"); + /* + * AVR32 CPU frequency rate scales in power of two between maximum and + * minimum, also add space for the table end marker. + * + * Further validate that the frequency is usable, and append it to the + * frequency table. + */ + steps = fls(frequency / min_freq) + 1; + freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table), + GFP_KERNEL); + if (!freq_table) { + retval = -ENOMEM; + goto out_err_put_clk; + } - return 0; + for (i = 0; i < (steps - 1); i++) { + rate = clk_round_rate(cpuclk, frequency * 1000) / 1000; + + if (rate != frequency) + freq_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else + freq_table[i].frequency = frequency; + + frequency /= 2; + } + + policy->clk = cpuclk; + freq_table[steps - 1].frequency = CPUFREQ_TABLE_END; + + retval = cpufreq_table_validate_and_show(policy, freq_table); + if (!retval) { + printk("cpufreq: AT32AP CPU frequency driver\n"); + return 0; + } + + kfree(freq_table); +out_err_put_clk: + clk_put(cpuclk); +out_err: + return retval; } static struct cpufreq_driver at32_driver = { .name = "at32ap", .init = at32_cpufreq_driver_init, - .verify = at32_verify_speed, - .target = at32_set_target, - .get = at32_get_speed, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = at32_set_target, + .get = cpufreq_generic_get, .flags = CPUFREQ_STICKY, }; diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c index ef05978a723..a9f8e5bd071 100644 --- a/drivers/cpufreq/blackfin-cpufreq.c +++ b/drivers/cpufreq/blackfin-cpufreq.c @@ -127,41 +127,28 @@ unsigned long cpu_set_cclk(int cpu, unsigned long new) } #endif -static int bfin_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) +static int bfin_target(struct cpufreq_policy *policy, unsigned int index) { #ifndef CONFIG_BF60x unsigned int plldiv; #endif - unsigned int index; - unsigned long cclk_hz; - struct cpufreq_freqs freqs; static unsigned long lpj_ref; static unsigned int lpj_ref_freq; + unsigned int old_freq, new_freq; int ret = 0; #if defined(CONFIG_CYCLES_CLOCKSOURCE) cycles_t cycles; #endif - if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, - relation, &index)) - return -EINVAL; + old_freq = bfin_getfreq_khz(0); + new_freq = bfin_freq_table[index].frequency; - cclk_hz = bfin_freq_table[index].frequency; - - freqs.old = bfin_getfreq_khz(0); - freqs.new = cclk_hz; - - pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", - cclk_hz, target_freq, freqs.old); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); #ifndef CONFIG_BF60x plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; bfin_write_PLL_DIV(plldiv); #else - ret = cpu_set_cclk(policy->cpu, freqs.new * 1000); + ret = cpu_set_cclk(policy->cpu, new_freq * 1000); if (ret != 0) { WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); return ret; @@ -177,25 +164,16 @@ static int bfin_target(struct cpufreq_policy *policy, #endif if (!lpj_ref_freq) { lpj_ref = loops_per_jiffy; - lpj_ref_freq = freqs.old; + lpj_ref_freq = old_freq; } - if (freqs.new != freqs.old) { + if (new_freq != old_freq) { loops_per_jiffy = cpufreq_scale(lpj_ref, - lpj_ref_freq, freqs.new); + lpj_ref_freq, new_freq); } - /* TODO: just test case for cycles clock source, remove later */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: done\n"); return ret; } -static int bfin_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, bfin_freq_table); -} - static int __bfin_cpu_init(struct cpufreq_policy *policy) { @@ -209,23 +187,16 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ - policy->cur = cclk; - cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); - return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); + return cpufreq_table_validate_and_show(policy, bfin_freq_table); } -static struct freq_attr *bfin_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver bfin_driver = { - .verify = bfin_verify_speed, - .target = bfin_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = bfin_target, .get = bfin_getfreq_khz, .init = __bfin_cpu_init, .name = "bfin cpufreq", - .attr = bfin_freq_attr, + .attr = cpufreq_generic_attr, }; static int __init bfin_cpu_init(void) diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index cbfffa91ebd..86beda9f950 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -12,14 +12,18 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> +#include <linux/cpu.h> +#include <linux/cpu_cooling.h> #include <linux/cpufreq.h> +#include <linux/cpumask.h> #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> +#include <linux/thermal.h> static unsigned int transition_latency; static unsigned int voltage_tolerance; /* in percentage */ @@ -28,74 +32,48 @@ static struct device *cpu_dev; static struct clk *cpu_clk; static struct regulator *cpu_reg; static struct cpufreq_frequency_table *freq_table; +static struct thermal_cooling_device *cdev; -static int cpu0_verify_speed(struct cpufreq_policy *policy) +static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index) { - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int cpu0_get_speed(unsigned int cpu) -{ - return clk_get_rate(cpu_clk) / 1000; -} - -static int cpu0_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - struct cpufreq_freqs freqs; - struct opp *opp; + struct dev_pm_opp *opp; unsigned long volt = 0, volt_old = 0, tol = 0; + unsigned int old_freq, new_freq; long freq_Hz, freq_exact; - unsigned int index; int ret; - ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &index); - if (ret) { - pr_err("failed to match target freqency %d: %d\n", - target_freq, ret); - return ret; - } - freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); - if (freq_Hz < 0) + if (freq_Hz <= 0) freq_Hz = freq_table[index].frequency * 1000; - freq_exact = freq_Hz; - freqs.new = freq_Hz / 1000; - freqs.old = clk_get_rate(cpu_clk) / 1000; - if (freqs.old == freqs.new) - return 0; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + freq_exact = freq_Hz; + new_freq = freq_Hz / 1000; + old_freq = clk_get_rate(cpu_clk) / 1000; if (!IS_ERR(cpu_reg)) { rcu_read_lock(); - opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); if (IS_ERR(opp)) { rcu_read_unlock(); pr_err("failed to find OPP for %ld\n", freq_Hz); - freqs.new = freqs.old; - ret = PTR_ERR(opp); - goto post_notify; + return PTR_ERR(opp); } - volt = opp_get_voltage(opp); + volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); tol = volt * voltage_tolerance / 100; volt_old = regulator_get_voltage(cpu_reg); } pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n", - freqs.old / 1000, volt_old ? volt_old / 1000 : -1, - freqs.new / 1000, volt ? volt / 1000 : -1); + old_freq / 1000, volt_old ? volt_old / 1000 : -1, + new_freq / 1000, volt ? volt / 1000 : -1); /* scaling up? scale voltage before frequency */ - if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) { + if (!IS_ERR(cpu_reg) && new_freq > old_freq) { ret = regulator_set_voltage_tol(cpu_reg, volt, tol); if (ret) { pr_err("failed to scale voltage up: %d\n", ret); - freqs.new = freqs.old; - goto post_notify; + return ret; } } @@ -104,72 +82,35 @@ static int cpu0_set_target(struct cpufreq_policy *policy, pr_err("failed to set clock rate: %d\n", ret); if (!IS_ERR(cpu_reg)) regulator_set_voltage_tol(cpu_reg, volt_old, tol); - freqs.new = freqs.old; - goto post_notify; + return ret; } /* scaling down? scale voltage after frequency */ - if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) { + if (!IS_ERR(cpu_reg) && new_freq < old_freq) { ret = regulator_set_voltage_tol(cpu_reg, volt, tol); if (ret) { pr_err("failed to scale voltage down: %d\n", ret); - clk_set_rate(cpu_clk, freqs.old * 1000); - freqs.new = freqs.old; + clk_set_rate(cpu_clk, old_freq * 1000); } } -post_notify: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return ret; } static int cpu0_cpufreq_init(struct cpufreq_policy *policy) { - int ret; - - ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (ret) { - pr_err("invalid frequency table: %d\n", ret); - return ret; - } - - policy->cpuinfo.transition_latency = transition_latency; - policy->cur = clk_get_rate(cpu_clk) / 1000; - - /* - * The driver only supports the SMP configuartion where all processors - * share the clock and voltage and clock. Use cpufreq affected_cpus - * interface to have all CPUs scaled together. - */ - cpumask_setall(policy->cpus); - - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - - return 0; -} - -static int cpu0_cpufreq_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - - return 0; + policy->clk = cpu_clk; + return cpufreq_generic_init(policy, freq_table, transition_latency); } -static struct freq_attr *cpu0_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver cpu0_cpufreq_driver = { - .flags = CPUFREQ_STICKY, - .verify = cpu0_verify_speed, - .target = cpu0_set_target, - .get = cpu0_get_speed, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cpu0_set_target, + .get = cpufreq_generic_get, .init = cpu0_cpufreq_init, - .exit = cpu0_cpufreq_exit, .name = "generic_cpu0", - .attr = cpu0_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int cpu0_cpufreq_probe(struct platform_device *pdev) @@ -177,7 +118,11 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) struct device_node *np; int ret; - cpu_dev = &pdev->dev; + cpu_dev = get_cpu_device(0); + if (!cpu_dev) { + pr_err("failed to get cpu0 device\n"); + return -ENODEV; + } np = of_node_get(cpu_dev->of_node); if (!np) { @@ -185,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) return -ENOENT; } - cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); + cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); if (IS_ERR(cpu_reg)) { /* * If cpu0 regulator supply node is present, but regulator is @@ -200,23 +145,20 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) PTR_ERR(cpu_reg)); } - cpu_clk = devm_clk_get(cpu_dev, NULL); + cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); pr_err("failed to get cpu0 clock: %d\n", ret); - goto out_put_node; + goto out_put_reg; } - ret = of_init_opp_table(cpu_dev); - if (ret) { - pr_err("failed to init OPP table: %d\n", ret); - goto out_put_node; - } + /* OPPs might be populated at runtime, don't check for error here */ + of_init_opp_table(cpu_dev); - ret = opp_init_cpufreq_table(cpu_dev, &freq_table); + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table: %d\n", ret); - goto out_put_node; + goto out_put_clk; } of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); @@ -224,8 +166,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; - if (cpu_reg) { - struct opp *opp; + if (!IS_ERR(cpu_reg)) { + struct dev_pm_opp *opp; unsigned long min_uV, max_uV; int i; @@ -237,12 +179,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) ; rcu_read_lock(); - opp = opp_find_freq_exact(cpu_dev, + opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); - min_uV = opp_get_voltage(opp); - opp = opp_find_freq_exact(cpu_dev, + min_uV = dev_pm_opp_get_voltage(opp); + opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[i-1].frequency * 1000, true); - max_uV = opp_get_voltage(opp); + max_uV = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); if (ret > 0) @@ -255,11 +197,28 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) goto out_free_table; } + /* + * For now, just loading the cooling device; + * thermal DT code takes care of matching them. + */ + if (of_find_property(np, "#cooling-cells", NULL)) { + cdev = of_cpufreq_cooling_register(np, cpu_present_mask); + if (IS_ERR(cdev)) + pr_err("running cpufreq without cooling device: %ld\n", + PTR_ERR(cdev)); + } + of_node_put(np); return 0; out_free_table: - opp_free_cpufreq_table(cpu_dev, &freq_table); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_put_clk: + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); +out_put_reg: + if (!IS_ERR(cpu_reg)) + regulator_put(cpu_reg); out_put_node: of_node_put(np); return ret; @@ -267,8 +226,9 @@ out_put_node: static int cpu0_cpufreq_remove(struct platform_device *pdev) { + cpufreq_cooling_unregister(cdev); cpufreq_unregister_driver(&cpu0_cpufreq_driver); - opp_free_cpufreq_table(cpu_dev, &freq_table); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); return 0; } diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index b83d45f6857..a2258090b58 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -270,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy, pr_debug("Old CPU frequency %d kHz, new %d kHz\n", freqs.old, freqs.new); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); /* Disable IRQs */ /* local_irq_save(flags); */ @@ -285,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy, /* Enable IRQs */ /* local_irq_restore(flags); */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); return 0; } @@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy) if (policy->min < (fsb_pol_max * fid * 100)) policy->max = (fsb_pol_max + 1) * fid * 100; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); return 0; } @@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = nforce2_get(policy->cpu); return 0; } @@ -382,7 +379,7 @@ static struct cpufreq_driver nforce2_driver = { }; #ifdef MODULE -static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = { +static const struct pci_device_id nforce2_ids[] = { { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2 }, {} }; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 43c24aa756f..6f024852c6f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -26,7 +26,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/syscore_ops.h> +#include <linux/suspend.h> #include <linux/tick.h> #include <trace/events/power.h> @@ -39,57 +39,20 @@ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); static DEFINE_RWLOCK(cpufreq_driver_lock); -static DEFINE_MUTEX(cpufreq_governor_lock); +DEFINE_MUTEX(cpufreq_governor_lock); static LIST_HEAD(cpufreq_policy_list); -#ifdef CONFIG_HOTPLUG_CPU /* This one keeps track of the previously set governor of a removed CPU */ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); -#endif -/* - * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure - * all cpufreq/hotplug/workqueue/etc related lock issues. - * - * The rules for this semaphore: - * - Any routine that wants to read from the policy structure will - * do a down_read on this semaphore. - * - Any routine that will write to the policy structure and/or may take away - * the policy altogether (eg. CPU hotplug), will hold this lock in write - * mode before doing so. - * - * Additional rules: - * - Governor routines that can be called in cpufreq hotplug path should not - * take this sem as top level hotplug notifier handler takes this. - * - Lock should not be held across - * __cpufreq_governor(data, CPUFREQ_GOV_STOP); - */ -static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); +/* Flag to suspend/resume CPUFreq governors */ +static bool cpufreq_suspended; -#define lock_policy_rwsem(mode, cpu) \ -static int lock_policy_rwsem_##mode(int cpu) \ -{ \ - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \ - BUG_ON(!policy); \ - down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \ - \ - return 0; \ -} - -lock_policy_rwsem(read, cpu); -lock_policy_rwsem(write, cpu); - -#define unlock_policy_rwsem(mode, cpu) \ -static void unlock_policy_rwsem_##mode(int cpu) \ -{ \ - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \ - BUG_ON(!policy); \ - up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \ +static inline bool has_target(void) +{ + return cpufreq_driver->target_index || cpufreq_driver->target; } -unlock_policy_rwsem(read, cpu); -unlock_policy_rwsem(write, cpu); - /* * rwsem to guarantee that cpufreq driver module doesn't unload during critical * sections @@ -135,7 +98,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex); bool have_governor_per_policy(void) { - return cpufreq_driver->have_governor_per_policy; + return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); } EXPORT_SYMBOL_GPL(have_governor_per_policy); @@ -183,6 +146,57 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) } EXPORT_SYMBOL_GPL(get_cpu_idle_time); +/* + * This is a generic cpufreq init() routine which can be used by cpufreq + * drivers of SMP systems. It will do following: + * - validate & show freq table passed + * - set policies transition latency + * - policy->cpus with all possible CPUs + */ +int cpufreq_generic_init(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int transition_latency) +{ + int ret; + + ret = cpufreq_table_validate_and_show(policy, table); + if (ret) { + pr_err("%s: invalid frequency table: %d\n", __func__, ret); + return ret; + } + + policy->cpuinfo.transition_latency = transition_latency; + + /* + * The driver only supports the SMP configuartion where all processors + * share the clock and voltage and clock. + */ + cpumask_setall(policy->cpus); + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_generic_init); + +unsigned int cpufreq_generic_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); + return 0; + } + + return clk_get_rate(policy->clk) / 1000; +} +EXPORT_SYMBOL_GPL(cpufreq_generic_get); + +/* Only for cpufreq core internal use */ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) +{ + return per_cpu(cpufreq_cpu_data, cpu); +} + struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { struct cpufreq_policy *policy = NULL; @@ -247,15 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; - pr_debug("saving %lu as reference value for loops_per_jiffy; " - "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); + pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", + l_p_j_ref, l_p_j_ref_freq); } - if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || - (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); - pr_debug("scaling loops_per_jiffy to %lu " - "for frequency %u kHz\n", loops_per_jiffy, ci->new); + pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", + loops_per_jiffy, ci->new); } } #else @@ -275,7 +288,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, freqs->flags = cpufreq_driver->flags; pr_debug("notification %u of frequency transition to %u kHz\n", - state, freqs->new); + state, freqs->new); switch (state) { @@ -287,9 +300,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { - pr_debug("Warning: CPU frequency is" - " %u, cpufreq assumed %u kHz.\n", - freqs->old, policy->cur); + pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", + freqs->old, policy->cur); freqs->old = policy->cur; } } @@ -300,8 +312,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, - (unsigned long)freqs->cpu); + pr_debug("FREQ: %lu - CPU: %lu\n", + (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); @@ -319,18 +331,106 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, * function. It is called twice on all CPU frequency changes that have * external effects. */ -void cpufreq_notify_transition(struct cpufreq_policy *policy, +static void cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, unsigned int state) { for_each_cpu(freqs->cpu, policy->cpus) __cpufreq_notify_transition(policy, freqs, state); } -EXPORT_SYMBOL_GPL(cpufreq_notify_transition); + +/* Do post notifications when there are chances that transition has failed */ +static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int transition_failed) +{ + cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); + if (!transition_failed) + return; + + swap(freqs->old, freqs->new); + cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); +} + +void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs) +{ + + /* + * Catch double invocations of _begin() which lead to self-deadlock. + * ASYNC_NOTIFICATION drivers are left out because the cpufreq core + * doesn't invoke _begin() on their behalf, and hence the chances of + * double invocations are very low. Moreover, there are scenarios + * where these checks can emit false-positive warnings in these + * drivers; so we avoid that by skipping them altogether. + */ + WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) + && current == policy->transition_task); + +wait: + wait_event(policy->transition_wait, !policy->transition_ongoing); + + spin_lock(&policy->transition_lock); + + if (unlikely(policy->transition_ongoing)) { + spin_unlock(&policy->transition_lock); + goto wait; + } + + policy->transition_ongoing = true; + policy->transition_task = current; + + spin_unlock(&policy->transition_lock); + + cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); +} +EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); + +void cpufreq_freq_transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int transition_failed) +{ + if (unlikely(WARN_ON(!policy->transition_ongoing))) + return; + + cpufreq_notify_post_transition(policy, freqs, transition_failed); + + policy->transition_ongoing = false; + policy->transition_task = NULL; + + wake_up(&policy->transition_wait); +} +EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); /********************************************************************* * SYSFS INTERFACE * *********************************************************************/ +static ssize_t show_boost(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); +} + +static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret, enable; + + ret = sscanf(buf, "%d", &enable); + if (ret != 1 || enable < 0 || enable > 1) + return -EINVAL; + + if (cpufreq_boost_trigger_state(enable)) { + pr_err("%s: Cannot %s BOOST!\n", + __func__, enable ? "enable" : "disable"); + return -EINVAL; + } + + pr_debug("%s: cpufreq BOOST %s\n", + __func__, enable ? "enabled" : "disabled"); + + return count; +} +define_one_global_rw(boost); static struct cpufreq_governor *__find_governor(const char *str_governor) { @@ -363,7 +463,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, *policy = CPUFREQ_POLICY_POWERSAVE; err = 0; } - } else if (cpufreq_driver->target) { + } else if (has_target()) { struct cpufreq_governor *t; mutex_lock(&cpufreq_governor_mutex); @@ -414,7 +514,7 @@ show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); show_one(scaling_cur_freq, cur); -static int __cpufreq_set_policy(struct cpufreq_policy *policy, +static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy); /** @@ -435,7 +535,7 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ - ret = __cpufreq_set_policy(policy, &new_policy); \ + ret = cpufreq_set_policy(policy, &new_policy); \ policy->user_policy.object = policy->object; \ \ return ret ? ret : count; \ @@ -493,11 +593,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, &new_policy.governor)) return -EINVAL; - /* - * Do not use cpufreq_set_policy here or the user_policy.max - * will be wrongly overridden - */ - ret = __cpufreq_set_policy(policy, &new_policy); + ret = cpufreq_set_policy(policy, &new_policy); policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; @@ -525,7 +621,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, ssize_t i = 0; struct cpufreq_governor *t; - if (!cpufreq_driver->target) { + if (!has_target()) { i += sprintf(buf, "performance powersave"); goto out; } @@ -653,24 +749,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret = -EINVAL; + ssize_t ret; if (!down_read_trylock(&cpufreq_rwsem)) - goto exit; + return -EINVAL; - if (lock_policy_rwsem_read(policy->cpu) < 0) - goto up_read; + down_read(&policy->rwsem); if (fattr->show) ret = fattr->show(policy, buf); else ret = -EIO; - unlock_policy_rwsem_read(policy->cpu); - -up_read: + up_read(&policy->rwsem); up_read(&cpufreq_rwsem); -exit: + return ret; } @@ -689,17 +782,15 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, if (!down_read_trylock(&cpufreq_rwsem)) goto unlock; - if (lock_policy_rwsem_write(policy->cpu) < 0) - goto up_read; + down_write(&policy->rwsem); if (fattr->store) ret = fattr->store(policy, buf, count); else ret = -EIO; - unlock_policy_rwsem_write(policy->cpu); + up_write(&policy->rwsem); -up_read: up_read(&cpufreq_rwsem); unlock: put_online_cpus(); @@ -815,7 +906,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, if (ret) goto err_out_kobj_put; } - if (cpufreq_driver->target) { + if (has_target()) { ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); if (ret) goto err_out_kobj_put; @@ -840,18 +931,28 @@ err_out_kobj_put: static void cpufreq_init_policy(struct cpufreq_policy *policy) { + struct cpufreq_governor *gov = NULL; struct cpufreq_policy new_policy; int ret = 0; memcpy(&new_policy, policy, sizeof(*policy)); - /* assure that the starting sequence is run in __cpufreq_set_policy */ - policy->governor = NULL; - /* set default policy */ - ret = __cpufreq_set_policy(policy, &new_policy); - policy->user_policy.policy = policy->policy; - policy->user_policy.governor = policy->governor; + /* Update governor of new_policy to the governor used before hotplug */ + gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); + if (gov) + pr_debug("Restoring governor %s for cpu %d\n", + policy->governor->name, policy->cpu); + else + gov = CPUFREQ_DEFAULT_GOVERNOR; + new_policy.governor = gov; + + /* Use the default policy if its valid. */ + if (cpufreq_driver->setpolicy) + cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); + + /* set default policy */ + ret = cpufreq_set_policy(policy, &new_policy); if (ret) { pr_debug("setting policy failed\n"); if (cpufreq_driver->exit) @@ -861,13 +962,12 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) #ifdef CONFIG_HOTPLUG_CPU static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, - unsigned int cpu, struct device *dev, - bool frozen) + unsigned int cpu, struct device *dev) { - int ret = 0, has_target = !!cpufreq_driver->target; + int ret = 0; unsigned long flags; - if (has_target) { + if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); if (ret) { pr_err("%s: Failed to stop governor\n", __func__); @@ -875,7 +975,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, } } - lock_policy_rwsem_write(policy->cpu); + down_write(&policy->rwsem); write_lock_irqsave(&cpufreq_driver_lock, flags); @@ -883,21 +983,20 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, per_cpu(cpufreq_cpu_data, cpu) = policy; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - unlock_policy_rwsem_write(policy->cpu); + up_write(&policy->rwsem); + + if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - if (has_target) { - if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || - (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { + if (ret) { pr_err("%s: Failed to start governor\n", __func__); return ret; } } - /* Don't touch sysfs links during light-weight init */ - if (!frozen) - ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); - - return ret; + return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); } #endif @@ -912,6 +1011,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) read_unlock_irqrestore(&cpufreq_driver_lock, flags); + policy->governor = NULL; + return policy; } @@ -930,6 +1031,10 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) goto err_free_cpumask; INIT_LIST_HEAD(&policy->policy_list); + init_rwsem(&policy->rwsem); + spin_lock_init(&policy->transition_lock); + init_waitqueue_head(&policy->transition_wait); + return policy; err_free_cpumask: @@ -940,6 +1045,30 @@ err_free_policy: return NULL; } +static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) +{ + struct kobject *kobj; + struct completion *cmp; + + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_REMOVE_POLICY, policy); + + down_read(&policy->rwsem); + kobj = &policy->kobj; + cmp = &policy->kobj_unregister; + up_read(&policy->rwsem); + kobject_put(kobj); + + /* + * We need to make sure that the underlying kobj is + * actually not referenced anymore by anybody before we + * proceed with unloading. + */ + pr_debug("waiting for dropping of refcount\n"); + wait_for_completion(cmp); + pr_debug("wait complete\n"); +} + static void cpufreq_policy_free(struct cpufreq_policy *policy) { free_cpumask_var(policy->related_cpus); @@ -949,29 +1078,29 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) { - if (cpu == policy->cpu) + if (WARN_ON(cpu == policy->cpu)) return; + down_write(&policy->rwsem); + policy->last_cpu = policy->cpu; policy->cpu = cpu; -#ifdef CONFIG_CPU_FREQ_TABLE - cpufreq_frequency_table_update_policy_cpu(policy); -#endif + up_write(&policy->rwsem); + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_UPDATE_POLICY_CPU, policy); } -static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, - bool frozen) +static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { unsigned int j, cpu = dev->id; int ret = -ENOMEM; struct cpufreq_policy *policy; unsigned long flags; + bool recover_policy = cpufreq_suspended; #ifdef CONFIG_HOTPLUG_CPU struct cpufreq_policy *tpolicy; - struct cpufreq_governor *gov; #endif if (cpu_is_offline(cpu)) @@ -998,7 +1127,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { read_unlock_irqrestore(&cpufreq_driver_lock, flags); - ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); + ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); up_read(&cpufreq_rwsem); return ret; } @@ -1006,15 +1135,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, read_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif - if (frozen) - /* Restore the saved policy when doing light-weight init */ - policy = cpufreq_policy_restore(cpu); - else + /* + * Restore the saved policy when doing light-weight init and fall back + * to the full init if that fails. + */ + policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; + if (!policy) { + recover_policy = false; policy = cpufreq_policy_alloc(); - - if (!policy) - goto nomem_out; - + if (!policy) + goto nomem_out; + } /* * In the resume path, since we restore a saved policy, the assignment @@ -1022,12 +1153,13 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, * the creation of a brand new one. So we need to perform this update * by invoking update_policy_cpu(). */ - if (frozen && cpu != policy->cpu) + if (recover_policy && cpu != policy->cpu) { update_policy_cpu(policy, cpu); - else + WARN_ON(kobject_move(&policy->kobj, &dev->kobj)); + } else { policy->cpu = cpu; + } - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; cpumask_copy(policy->cpus, cpumask_of(cpu)); init_completion(&policy->kobj_unregister); @@ -1051,30 +1183,74 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, */ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); - policy->user_policy.min = policy->min; - policy->user_policy.max = policy->max; - - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_START, policy); - -#ifdef CONFIG_HOTPLUG_CPU - gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); - if (gov) { - policy->governor = gov; - pr_debug("Restoring governor %s for cpu %d\n", - policy->governor->name, cpu); + if (!recover_policy) { + policy->user_policy.min = policy->min; + policy->user_policy.max = policy->max; } -#endif + down_write(&policy->rwsem); write_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) per_cpu(cpufreq_cpu_data, j) = policy; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (!frozen) { + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { + policy->cur = cpufreq_driver->get(policy->cpu); + if (!policy->cur) { + pr_err("%s: ->get() failed\n", __func__); + goto err_get_freq; + } + } + + /* + * Sometimes boot loaders set CPU frequency to a value outside of + * frequency table present with cpufreq core. In such cases CPU might be + * unstable if it has to run on that frequency for long duration of time + * and so its better to set it to a frequency which is specified in + * freq-table. This also makes cpufreq stats inconsistent as + * cpufreq-stats would fail to register because current frequency of CPU + * isn't found in freq-table. + * + * Because we don't want this change to effect boot process badly, we go + * for the next freq which is >= policy->cur ('cur' must be set by now, + * otherwise we will end up setting freq to lowest of the table as 'cur' + * is initialized to zero). + * + * We are passing target-freq as "policy->cur - 1" otherwise + * __cpufreq_driver_target() would simply fail, as policy->cur will be + * equal to target-freq. + */ + if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) + && has_target()) { + /* Are we running at unknown frequency ? */ + ret = cpufreq_frequency_table_get_index(policy, policy->cur); + if (ret == -EINVAL) { + /* Warn user and fix it */ + pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", + __func__, policy->cpu, policy->cur); + ret = __cpufreq_driver_target(policy, policy->cur - 1, + CPUFREQ_RELATION_L); + + /* + * Reaching here after boot in a few seconds may not + * mean that system will remain stable at "unknown" + * frequency for longer duration. Hence, a BUG_ON(). + */ + BUG_ON(ret); + pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", + __func__, policy->cpu, policy->cur); + } + } + + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_START, policy); + + if (!recover_policy) { ret = cpufreq_add_dev_interface(policy, dev); if (ret) goto err_out_unregister; + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_CREATE_POLICY, policy); } write_lock_irqsave(&cpufreq_driver_lock, flags); @@ -1083,6 +1259,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, cpufreq_init_policy(policy); + if (!recover_policy) { + policy->user_policy.policy = policy->policy; + policy->user_policy.governor = policy->governor; + } + up_write(&policy->rwsem); + kobject_uevent(&policy->kobj, KOBJ_ADD); up_read(&cpufreq_rwsem); @@ -1091,13 +1273,22 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, return 0; err_out_unregister: +err_get_freq: write_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) per_cpu(cpufreq_cpu_data, j) = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); err_set_policy_cpu: + if (recover_policy) { + /* Do not leave stale fallback data behind. */ + per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; + cpufreq_policy_put_kobj(policy); + } cpufreq_policy_free(policy); + nomem_out: up_read(&cpufreq_rwsem); @@ -1115,30 +1306,26 @@ nomem_out: */ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { - return __cpufreq_add_dev(dev, sif, false); + return __cpufreq_add_dev(dev, sif); } static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, - unsigned int old_cpu, bool frozen) + unsigned int old_cpu) { struct device *cpu_dev; int ret; /* first sibling now owns the new sysfs dir */ - cpu_dev = get_cpu_device(cpumask_first(policy->cpus)); - - /* Don't touch sysfs files during light-weight tear-down */ - if (frozen) - return cpu_dev->id; + cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); ret = kobject_move(&policy->kobj, &cpu_dev->kobj); if (ret) { - pr_err("%s: Failed to move kobj: %d", __func__, ret); + pr_err("%s: Failed to move kobj: %d\n", __func__, ret); - WARN_ON(lock_policy_rwsem_write(old_cpu)); + down_write(&policy->rwsem); cpumask_set_cpu(old_cpu, policy->cpus); - unlock_policy_rwsem_write(old_cpu); + up_write(&policy->rwsem); ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); @@ -1150,8 +1337,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, } static int __cpufreq_remove_dev_prepare(struct device *dev, - struct subsys_interface *sif, - bool frozen) + struct subsys_interface *sif) { unsigned int cpu = dev->id, cpus; int new_cpu, ret; @@ -1165,7 +1351,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, policy = per_cpu(cpufreq_cpu_data, cpu); /* Save the policy somewhere when doing a light-weight tear-down */ - if (frozen) + if (cpufreq_suspended) per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; write_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -1175,7 +1361,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, return -EINVAL; } - if (cpufreq_driver->target) { + if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); if (ret) { pr_err("%s: Failed to stop governor\n", __func__); @@ -1183,50 +1369,39 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, } } -#ifdef CONFIG_HOTPLUG_CPU if (!cpufreq_driver->setpolicy) strncpy(per_cpu(cpufreq_cpu_governor, cpu), policy->governor->name, CPUFREQ_NAME_LEN); -#endif - WARN_ON(lock_policy_rwsem_write(cpu)); + down_read(&policy->rwsem); cpus = cpumask_weight(policy->cpus); - - if (cpus > 1) - cpumask_clear_cpu(cpu, policy->cpus); - unlock_policy_rwsem_write(cpu); + up_read(&policy->rwsem); if (cpu != policy->cpu) { - if (!frozen) - sysfs_remove_link(&dev->kobj, "cpufreq"); + sysfs_remove_link(&dev->kobj, "cpufreq"); } else if (cpus > 1) { - - new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); + new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); if (new_cpu >= 0) { - WARN_ON(lock_policy_rwsem_write(cpu)); update_policy_cpu(policy, new_cpu); - unlock_policy_rwsem_write(cpu); - if (!frozen) { - pr_debug("%s: policy Kobject moved to cpu: %d " - "from: %d\n",__func__, new_cpu, cpu); - } + if (!cpufreq_suspended) + pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", + __func__, new_cpu, cpu); } + } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) { + cpufreq_driver->stop_cpu(policy); } return 0; } static int __cpufreq_remove_dev_finish(struct device *dev, - struct subsys_interface *sif, - bool frozen) + struct subsys_interface *sif) { unsigned int cpu = dev->id, cpus; int ret; unsigned long flags; struct cpufreq_policy *policy; - struct kobject *kobj; - struct completion *cmp; read_lock_irqsave(&cpufreq_driver_lock, flags); policy = per_cpu(cpufreq_cpu_data, cpu); @@ -1237,38 +1412,27 @@ static int __cpufreq_remove_dev_finish(struct device *dev, return -EINVAL; } - lock_policy_rwsem_read(cpu); + down_write(&policy->rwsem); cpus = cpumask_weight(policy->cpus); - unlock_policy_rwsem_read(cpu); + + if (cpus > 1) + cpumask_clear_cpu(cpu, policy->cpus); + up_write(&policy->rwsem); /* If cpu is last user of policy, free policy */ if (cpus == 1) { - if (cpufreq_driver->target) { + if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); if (ret) { pr_err("%s: Failed to exit governor\n", - __func__); + __func__); return ret; } } - if (!frozen) { - lock_policy_rwsem_read(cpu); - kobj = &policy->kobj; - cmp = &policy->kobj_unregister; - unlock_policy_rwsem_read(cpu); - kobject_put(kobj); - - /* - * We need to make sure that the underlying kobj is - * actually not referenced anymore by anybody before we - * proceed with unloading. - */ - pr_debug("waiting for dropping of refcount\n"); - wait_for_completion(cmp); - pr_debug("wait complete\n"); - } + if (!cpufreq_suspended) + cpufreq_policy_put_kobj(policy); /* * Perform the ->exit() even during light-weight tear-down, @@ -1283,16 +1447,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev, list_del(&policy->policy_list); write_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (!frozen) + if (!cpufreq_suspended) cpufreq_policy_free(policy); - } else { - if (cpufreq_driver->target) { - if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || - (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { - pr_err("%s: Failed to start governor\n", - __func__); - return ret; - } + } else if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + + if (ret) { + pr_err("%s: Failed to start governor\n", __func__); + return ret; } } @@ -1301,36 +1465,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev, } /** - * __cpufreq_remove_dev - remove a CPU device + * cpufreq_remove_dev - remove a CPU device * * Removes the cpufreq interface for a CPU device. - * Caller should already have policy_rwsem in write mode for this CPU. - * This routine frees the rwsem before returning. */ -static inline int __cpufreq_remove_dev(struct device *dev, - struct subsys_interface *sif, - bool frozen) -{ - int ret; - - ret = __cpufreq_remove_dev_prepare(dev, sif, frozen); - - if (!ret) - ret = __cpufreq_remove_dev_finish(dev, sif, frozen); - - return ret; -} - static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; - int retval; + int ret; if (cpu_is_offline(cpu)) return 0; - retval = __cpufreq_remove_dev(dev, sif, false); - return retval; + ret = __cpufreq_remove_dev_prepare(dev, sif); + + if (!ret) + ret = __cpufreq_remove_dev_finish(dev, sif); + + return ret; } static void handle_update(struct work_struct *work) @@ -1359,8 +1511,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, struct cpufreq_freqs freqs; unsigned long flags; - pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " - "core thinks of %u, is %u kHz.\n", old_freq, new_freq); + pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", + old_freq, new_freq); freqs.old = old_freq; freqs.new = new_freq; @@ -1369,8 +1521,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, policy = per_cpu(cpufreq_cpu_data, cpu); read_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_begin(policy, &freqs); + cpufreq_freq_transition_end(policy, &freqs, 0); } /** @@ -1449,20 +1601,16 @@ static unsigned int __cpufreq_get(unsigned int cpu) */ unsigned int cpufreq_get(unsigned int cpu) { + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); unsigned int ret_freq = 0; - if (!down_read_trylock(&cpufreq_rwsem)) - return 0; - - if (unlikely(lock_policy_rwsem_read(cpu))) - goto out_policy; - - ret_freq = __cpufreq_get(cpu); - - unlock_policy_rwsem_read(cpu); + if (policy) { + down_read(&policy->rwsem); + ret_freq = __cpufreq_get(cpu); + up_read(&policy->rwsem); -out_policy: - up_read(&cpufreq_rwsem); + cpufreq_cpu_put(policy); + } return ret_freq; } @@ -1475,83 +1623,103 @@ static struct subsys_interface cpufreq_interface = { .remove_dev = cpufreq_remove_dev, }; +/* + * In case platform wants some specific frequency to be configured + * during suspend.. + */ +int cpufreq_generic_suspend(struct cpufreq_policy *policy) +{ + int ret; + + if (!policy->suspend_freq) { + pr_err("%s: suspend_freq can't be zero\n", __func__); + return -EINVAL; + } + + pr_debug("%s: Setting suspend-freq: %u\n", __func__, + policy->suspend_freq); + + ret = __cpufreq_driver_target(policy, policy->suspend_freq, + CPUFREQ_RELATION_H); + if (ret) + pr_err("%s: unable to set suspend-freq: %u. err: %d\n", + __func__, policy->suspend_freq, ret); + + return ret; +} +EXPORT_SYMBOL(cpufreq_generic_suspend); + /** - * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. + * cpufreq_suspend() - Suspend CPUFreq governors * - * This function is only executed for the boot processor. The other CPUs - * have been put offline by means of CPU hotplug. + * Called during system wide Suspend/Hibernate cycles for suspending governors + * as some platforms can't change frequency after this point in suspend cycle. + * Because some of the devices (like: i2c, regulators, etc) they use for + * changing frequency are suspended quickly after this point. */ -static int cpufreq_bp_suspend(void) +void cpufreq_suspend(void) { - int ret = 0; - - int cpu = smp_processor_id(); struct cpufreq_policy *policy; - pr_debug("suspending cpu %u\n", cpu); + if (!cpufreq_driver) + return; - /* If there's no policy for the boot CPU, we have nothing to do. */ - policy = cpufreq_cpu_get(cpu); - if (!policy) - return 0; + if (!has_target()) + return; - if (cpufreq_driver->suspend) { - ret = cpufreq_driver->suspend(policy); - if (ret) - printk(KERN_ERR "cpufreq: suspend failed in ->suspend " - "step on CPU %u\n", policy->cpu); + pr_debug("%s: Suspending Governors\n", __func__); + + list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { + if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) + pr_err("%s: Failed to stop governor for policy: %p\n", + __func__, policy); + else if (cpufreq_driver->suspend + && cpufreq_driver->suspend(policy)) + pr_err("%s: Failed to suspend driver: %p\n", __func__, + policy); } - cpufreq_cpu_put(policy); - return ret; + cpufreq_suspended = true; } /** - * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. - * - * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) - * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are - * restored. It will verify that the current freq is in sync with - * what we believe it to be. This is a bit later than when it - * should be, but nonethteless it's better than calling - * cpufreq_driver->get() here which might re-enable interrupts... + * cpufreq_resume() - Resume CPUFreq governors * - * This function is only executed for the boot CPU. The other CPUs have not - * been turned on yet. + * Called during system wide Suspend/Hibernate cycle for resuming governors that + * are suspended with cpufreq_suspend(). */ -static void cpufreq_bp_resume(void) +void cpufreq_resume(void) { - int ret = 0; - - int cpu = smp_processor_id(); struct cpufreq_policy *policy; - pr_debug("resuming cpu %u\n", cpu); + if (!cpufreq_driver) + return; - /* If there's no policy for the boot CPU, we have nothing to do. */ - policy = cpufreq_cpu_get(cpu); - if (!policy) + if (!has_target()) return; - if (cpufreq_driver->resume) { - ret = cpufreq_driver->resume(policy); - if (ret) { - printk(KERN_ERR "cpufreq: resume failed in ->resume " - "step on CPU %u\n", policy->cpu); - goto fail; - } - } + pr_debug("%s: Resuming Governors\n", __func__); - schedule_work(&policy->update); + cpufreq_suspended = false; -fail: - cpufreq_cpu_put(policy); -} + list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { + if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) + pr_err("%s: Failed to resume driver: %p\n", __func__, + policy); + else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) + || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) + pr_err("%s: Failed to start governor for policy: %p\n", + __func__, policy); -static struct syscore_ops cpufreq_syscore_ops = { - .suspend = cpufreq_bp_suspend, - .resume = cpufreq_bp_resume, -}; + /* + * schedule call cpufreq_update_policy() for boot CPU, i.e. last + * policy in list. It will verify that the current freq is in + * sync with what we believe it to be. + */ + if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) + schedule_work(&policy->update); + } +} /** * cpufreq_get_current_driver - return current driver's name @@ -1650,12 +1818,92 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); * GOVERNORS * *********************************************************************/ +/* Must set freqs->new to intermediate frequency */ +static int __target_intermediate(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int index) +{ + int ret; + + freqs->new = cpufreq_driver->get_intermediate(policy, index); + + /* We don't need to switch to intermediate freq */ + if (!freqs->new) + return 0; + + pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", + __func__, policy->cpu, freqs->old, freqs->new); + + cpufreq_freq_transition_begin(policy, freqs); + ret = cpufreq_driver->target_intermediate(policy, index); + cpufreq_freq_transition_end(policy, freqs, ret); + + if (ret) + pr_err("%s: Failed to change to intermediate frequency: %d\n", + __func__, ret); + + return ret; +} + +static int __target_index(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *freq_table, int index) +{ + struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; + unsigned int intermediate_freq = 0; + int retval = -EINVAL; + bool notify; + + notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); + if (notify) { + /* Handle switching to intermediate frequency */ + if (cpufreq_driver->get_intermediate) { + retval = __target_intermediate(policy, &freqs, index); + if (retval) + return retval; + + intermediate_freq = freqs.new; + /* Set old freq to intermediate */ + if (intermediate_freq) + freqs.old = freqs.new; + } + + freqs.new = freq_table[index].frequency; + pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", + __func__, policy->cpu, freqs.old, freqs.new); + + cpufreq_freq_transition_begin(policy, &freqs); + } + + retval = cpufreq_driver->target_index(policy, index); + if (retval) + pr_err("%s: Failed to change cpu frequency: %d\n", __func__, + retval); + + if (notify) { + cpufreq_freq_transition_end(policy, &freqs, retval); + + /* + * Failed after setting to intermediate freq? Driver should have + * reverted back to initial frequency and so should we. Check + * here for intermediate_freq instead of get_intermediate, in + * case we have't switched to intermediate freq at all. + */ + if (unlikely(retval && intermediate_freq)) { + freqs.old = intermediate_freq; + freqs.new = policy->restore_freq; + cpufreq_freq_transition_begin(policy, &freqs); + cpufreq_freq_transition_end(policy, &freqs, 0); + } + } + + return retval; +} + int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int retval = -EINVAL; unsigned int old_target_freq = target_freq; + int retval = -EINVAL; if (cpufreq_disabled()) return -ENODEV; @@ -1667,14 +1915,48 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, target_freq = policy->min; pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", - policy->cpu, target_freq, relation, old_target_freq); + policy->cpu, target_freq, relation, old_target_freq); + /* + * This might look like a redundant call as we are checking it again + * after finding index. But it is left intentionally for cases where + * exactly same freq is called again and so we can save on few function + * calls. + */ if (target_freq == policy->cur) return 0; + /* Save last value to restore later on errors */ + policy->restore_freq = policy->cur; + if (cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); + else if (cpufreq_driver->target_index) { + struct cpufreq_frequency_table *freq_table; + int index; + + freq_table = cpufreq_frequency_get_table(policy->cpu); + if (unlikely(!freq_table)) { + pr_err("%s: Unable to find freq_table\n", __func__); + goto out; + } + + retval = cpufreq_frequency_table_target(policy, freq_table, + target_freq, relation, &index); + if (unlikely(retval)) { + pr_err("%s: Unable to find matching freq\n", __func__); + goto out; + } + + if (freq_table[index].frequency == policy->cur) { + retval = 0; + goto out; + } + retval = __target_index(policy, freq_table, index); + } + +out: return retval; } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); @@ -1685,14 +1967,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, { int ret = -EINVAL; - if (unlikely(lock_policy_rwsem_write(policy->cpu))) - goto fail; + down_write(&policy->rwsem); ret = __cpufreq_driver_target(policy, target_freq, relation); - unlock_policy_rwsem_write(policy->cpu); + up_write(&policy->rwsem); -fail: return ret; } EXPORT_SYMBOL_GPL(cpufreq_driver_target); @@ -1716,17 +1996,18 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, struct cpufreq_governor *gov = NULL; #endif + /* Don't start any governor operations if we are entering suspend */ + if (cpufreq_suspended) + return 0; + if (policy->governor->max_transition_latency && policy->cpuinfo.transition_latency > policy->governor->max_transition_latency) { if (!gov) return -EINVAL; else { - printk(KERN_WARNING "%s governor failed, too long" - " transition latency of HW, fallback" - " to %s governor\n", - policy->governor->name, - gov->name); + pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", + policy->governor->name, gov->name); policy->governor = gov; } } @@ -1736,7 +2017,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, return -EINVAL; pr_debug("__cpufreq_governor for CPU %u, event %u\n", - policy->cpu, event); + policy->cpu, event); mutex_lock(&cpufreq_governor_lock); if ((policy->governor_enabled && event == CPUFREQ_GOV_START) @@ -1803,9 +2084,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor) { -#ifdef CONFIG_HOTPLUG_CPU int cpu; -#endif if (!governor) return; @@ -1813,14 +2092,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) if (cpufreq_disabled()) return; -#ifdef CONFIG_HOTPLUG_CPU for_each_present_cpu(cpu) { if (cpu_online(cpu)) continue; if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); } -#endif mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); @@ -1859,28 +2136,27 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) EXPORT_SYMBOL(cpufreq_get_policy); /* - * data : current policy. - * policy : policy to be set. + * policy : current policy. + * new_policy: policy to be set. */ -static int __cpufreq_set_policy(struct cpufreq_policy *policy, +static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy) { - int ret = 0, failed = 1; + struct cpufreq_governor *old_gov; + int ret; - pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu, - new_policy->min, new_policy->max); + pr_debug("setting new policy for CPU %u: %u - %u kHz\n", + new_policy->cpu, new_policy->min, new_policy->max); memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); - if (new_policy->min > policy->max || new_policy->max < policy->min) { - ret = -EINVAL; - goto error_out; - } + if (new_policy->min > policy->max || new_policy->max < policy->min) + return -EINVAL; /* verify the cpu speed can be set within this limit */ ret = cpufreq_driver->verify(new_policy); if (ret) - goto error_out; + return ret; /* adjust if necessary - all reasons */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, @@ -1896,7 +2172,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy, */ ret = cpufreq_driver->verify(new_policy); if (ret) - goto error_out; + return ret; /* notification of the new policy */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, @@ -1906,63 +2182,53 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy, policy->max = new_policy->max; pr_debug("new min and max freqs are %u - %u kHz\n", - policy->min, policy->max); + policy->min, policy->max); if (cpufreq_driver->setpolicy) { policy->policy = new_policy->policy; pr_debug("setting range\n"); - ret = cpufreq_driver->setpolicy(new_policy); - } else { - if (new_policy->governor != policy->governor) { - /* save old, working values */ - struct cpufreq_governor *old_gov = policy->governor; - - pr_debug("governor switch\n"); - - /* end old governor */ - if (policy->governor) { - __cpufreq_governor(policy, CPUFREQ_GOV_STOP); - unlock_policy_rwsem_write(new_policy->cpu); - __cpufreq_governor(policy, - CPUFREQ_GOV_POLICY_EXIT); - lock_policy_rwsem_write(new_policy->cpu); - } + return cpufreq_driver->setpolicy(new_policy); + } - /* start new governor */ - policy->governor = new_policy->governor; - if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { - if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { - failed = 0; - } else { - unlock_policy_rwsem_write(new_policy->cpu); - __cpufreq_governor(policy, - CPUFREQ_GOV_POLICY_EXIT); - lock_policy_rwsem_write(new_policy->cpu); - } - } + if (new_policy->governor == policy->governor) + goto out; - if (failed) { - /* new governor failed, so re-start old one */ - pr_debug("starting governor %s failed\n", - policy->governor->name); - if (old_gov) { - policy->governor = old_gov; - __cpufreq_governor(policy, - CPUFREQ_GOV_POLICY_INIT); - __cpufreq_governor(policy, - CPUFREQ_GOV_START); - } - ret = -EINVAL; - goto error_out; - } - /* might be a policy change, too, so fall through */ - } - pr_debug("governor: change or update limits\n"); - ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + pr_debug("governor switch\n"); + + /* save old, working values */ + old_gov = policy->governor; + /* end old governor */ + if (old_gov) { + __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + up_write(&policy->rwsem); + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + down_write(&policy->rwsem); } -error_out: - return ret; + /* start new governor */ + policy->governor = new_policy->governor; + if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { + if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) + goto out; + + up_write(&policy->rwsem); + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + down_write(&policy->rwsem); + } + + /* new governor failed, so re-start old one */ + pr_debug("starting governor %s failed\n", policy->governor->name); + if (old_gov) { + policy->governor = old_gov; + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); + __cpufreq_governor(policy, CPUFREQ_GOV_START); + } + + return -EINVAL; + + out: + pr_debug("governor: change or update limits\n"); + return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); } /** @@ -1978,15 +2244,10 @@ int cpufreq_update_policy(unsigned int cpu) struct cpufreq_policy new_policy; int ret; - if (!policy) { - ret = -ENODEV; - goto no_policy; - } + if (!policy) + return -ENODEV; - if (unlikely(lock_policy_rwsem_write(cpu))) { - ret = -EINVAL; - goto fail; - } + down_write(&policy->rwsem); pr_debug("updating policy for CPU %u\n", cpu); memcpy(&new_policy, policy, sizeof(*policy)); @@ -1999,25 +2260,29 @@ int cpufreq_update_policy(unsigned int cpu) * BIOS might change freq behind our back * -> ask driver for current freq and notify governors about a change */ - if (cpufreq_driver->get) { + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { new_policy.cur = cpufreq_driver->get(cpu); + if (WARN_ON(!new_policy.cur)) { + ret = -EIO; + goto unlock; + } + if (!policy->cur) { - pr_debug("Driver did not initialize current freq"); + pr_debug("Driver did not initialize current freq\n"); policy->cur = new_policy.cur; } else { - if (policy->cur != new_policy.cur && cpufreq_driver->target) + if (policy->cur != new_policy.cur && has_target()) cpufreq_out_of_sync(cpu, policy->cur, new_policy.cur); } } - ret = __cpufreq_set_policy(policy, &new_policy); + ret = cpufreq_set_policy(policy, &new_policy); - unlock_policy_rwsem_write(cpu); +unlock: + up_write(&policy->rwsem); -fail: cpufreq_cpu_put(policy); -no_policy: return ret; } EXPORT_SYMBOL(cpufreq_update_policy); @@ -2027,30 +2292,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, { unsigned int cpu = (unsigned long)hcpu; struct device *dev; - bool frozen = false; dev = get_cpu_device(cpu); if (dev) { - - if (action & CPU_TASKS_FROZEN) - frozen = true; - switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: - __cpufreq_add_dev(dev, NULL, frozen); - cpufreq_update_policy(cpu); + __cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: - __cpufreq_remove_dev_prepare(dev, NULL, frozen); + __cpufreq_remove_dev_prepare(dev, NULL); break; case CPU_POST_DEAD: - __cpufreq_remove_dev_finish(dev, NULL, frozen); + __cpufreq_remove_dev_finish(dev, NULL); break; case CPU_DOWN_FAILED: - __cpufreq_add_dev(dev, NULL, frozen); + __cpufreq_add_dev(dev, NULL); break; } } @@ -2062,6 +2321,73 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = { }; /********************************************************************* + * BOOST * + *********************************************************************/ +static int cpufreq_boost_set_sw(int state) +{ + struct cpufreq_frequency_table *freq_table; + struct cpufreq_policy *policy; + int ret = -EINVAL; + + list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { + freq_table = cpufreq_frequency_get_table(policy->cpu); + if (freq_table) { + ret = cpufreq_frequency_table_cpuinfo(policy, + freq_table); + if (ret) { + pr_err("%s: Policy frequency update failed\n", + __func__); + break; + } + policy->user_policy.max = policy->max; + __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + } + } + + return ret; +} + +int cpufreq_boost_trigger_state(int state) +{ + unsigned long flags; + int ret = 0; + + if (cpufreq_driver->boost_enabled == state) + return 0; + + write_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver->boost_enabled = state; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + ret = cpufreq_driver->set_boost(state); + if (ret) { + write_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver->boost_enabled = !state; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + pr_err("%s: Cannot %s BOOST\n", + __func__, state ? "enable" : "disable"); + } + + return ret; +} + +int cpufreq_boost_supported(void) +{ + if (likely(cpufreq_driver)) + return cpufreq_driver->boost_supported; + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_boost_supported); + +int cpufreq_boost_enabled(void) +{ + return cpufreq_driver->boost_enabled; +} +EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); + +/********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * *********************************************************************/ @@ -2084,7 +2410,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) return -ENODEV; if (!driver_data || !driver_data->verify || !driver_data->init || - ((!driver_data->setpolicy) && (!driver_data->target))) + !(driver_data->setpolicy || driver_data->target_index || + driver_data->target) || + (driver_data->setpolicy && (driver_data->target_index || + driver_data->target)) || + (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) return -EINVAL; pr_debug("trying to register driver %s\n", driver_data->name); @@ -2095,14 +2425,30 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) write_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver) { write_unlock_irqrestore(&cpufreq_driver_lock, flags); - return -EBUSY; + return -EEXIST; } cpufreq_driver = driver_data; write_unlock_irqrestore(&cpufreq_driver_lock, flags); + if (cpufreq_boost_supported()) { + /* + * Check if driver provides function to enable boost - + * if not, use cpufreq_boost_set_sw as default + */ + if (!cpufreq_driver->set_boost) + cpufreq_driver->set_boost = cpufreq_boost_set_sw; + + ret = cpufreq_sysfs_create_file(&boost.attr); + if (ret) { + pr_err("%s: cannot register global BOOST sysfs file\n", + __func__); + goto err_null_driver; + } + } + ret = subsys_interface_register(&cpufreq_interface); if (ret) - goto err_null_driver; + goto err_boost_unreg; if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { int i; @@ -2118,7 +2464,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) /* if all ->init() calls failed, unregister */ if (ret) { pr_debug("no CPU initialized for driver %s\n", - driver_data->name); + driver_data->name); goto err_if_unreg; } } @@ -2129,6 +2475,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) return 0; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); +err_boost_unreg: + if (cpufreq_boost_supported()) + cpufreq_sysfs_remove_file(&boost.attr); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; @@ -2155,6 +2504,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) pr_debug("unregistering driver %s\n", driver->name); subsys_interface_unregister(&cpufreq_interface); + if (cpufreq_boost_supported()) + cpufreq_sysfs_remove_file(&boost.attr); + unregister_hotcpu_notifier(&cpufreq_cpu_notifier); down_write(&cpufreq_rwsem); @@ -2171,17 +2523,11 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); static int __init cpufreq_core_init(void) { - int cpu; - if (cpufreq_disabled()) return -ENODEV; - for_each_possible_cpu(cpu) - init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); - cpufreq_global_kobject = kobject_create(); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); return 0; } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index f62d822048e..25a70d06c5b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -68,6 +68,9 @@ static void cs_check_cpu(int cpu, unsigned int load) dbs_info->requested_freq += get_freq_target(cs_tuners, policy); + if (dbs_info->requested_freq > policy->max) + dbs_info->requested_freq = policy->max; + __cpufreq_driver_target(policy, dbs_info->requested_freq, CPUFREQ_RELATION_H); return; @@ -80,13 +83,18 @@ static void cs_check_cpu(int cpu, unsigned int load) /* Check for frequency decrease */ if (load < cs_tuners->down_threshold) { + unsigned int freq_target; /* * if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; - dbs_info->requested_freq -= get_freq_target(cs_tuners, policy); + freq_target = get_freq_target(cs_tuners, policy); + if (dbs_info->requested_freq > freq_target) + dbs_info->requested_freq -= freq_target; + else + dbs_info->requested_freq = policy->min; __cpufreq_driver_target(policy, dbs_info->requested_freq, CPUFREQ_RELATION_L); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 0806c31e576..1b44496b2d2 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -36,14 +36,29 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cpufreq_policy *policy; + unsigned int sampling_rate; unsigned int max_load = 0; unsigned int ignore_nice; unsigned int j; - if (dbs_data->cdata->governor == GOV_ONDEMAND) + if (dbs_data->cdata->governor == GOV_ONDEMAND) { + struct od_cpu_dbs_info_s *od_dbs_info = + dbs_data->cdata->get_cpu_dbs_info_s(cpu); + + /* + * Sometimes, the ondemand governor uses an additional + * multiplier to give long delays. So apply this multiplier to + * the 'sampling_rate', so as to keep the wake-up-from-idle + * detection logic a bit conservative. + */ + sampling_rate = od_tuners->sampling_rate; + sampling_rate *= od_dbs_info->rate_mult; + ignore_nice = od_tuners->ignore_nice_load; - else + } else { + sampling_rate = cs_tuners->sampling_rate; ignore_nice = cs_tuners->ignore_nice_load; + } policy = cdbs->cur_policy; @@ -96,7 +111,46 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) if (unlikely(!wall_time || wall_time < idle_time)) continue; - load = 100 * (wall_time - idle_time) / wall_time; + /* + * If the CPU had gone completely idle, and a task just woke up + * on this CPU now, it would be unfair to calculate 'load' the + * usual way for this elapsed time-window, because it will show + * near-zero load, irrespective of how CPU intensive that task + * actually is. This is undesirable for latency-sensitive bursty + * workloads. + * + * To avoid this, we reuse the 'load' from the previous + * time-window and give this task a chance to start with a + * reasonably high CPU frequency. (However, we shouldn't over-do + * this copy, lest we get stuck at a high load (high frequency) + * for too long, even when the current system load has actually + * dropped down. So we perform the copy only once, upon the + * first wake-up from idle.) + * + * Detecting this situation is easy: the governor's deferrable + * timer would not have fired during CPU-idle periods. Hence + * an unusually large 'wall_time' (as compared to the sampling + * rate) indicates this scenario. + * + * prev_load can be zero in two cases and we must recalculate it + * for both cases: + * - during long idle intervals + * - explicitly set to zero + */ + if (unlikely(wall_time > (2 * sampling_rate) && + j_cdbs->prev_load)) { + load = j_cdbs->prev_load; + + /* + * Perform a destructive copy, to ensure that we copy + * the previous load only once, upon the first wake-up + * from idle. + */ + j_cdbs->prev_load = 0; + } else { + load = 100 * (wall_time - idle_time) / wall_time; + j_cdbs->prev_load = load; + } if (load > max_load) max_load = load; @@ -119,8 +173,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, { int i; + mutex_lock(&cpufreq_governor_lock); if (!policy->governor_enabled) - return; + goto out_unlock; if (!all_cpus) { /* @@ -135,6 +190,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, for_each_cpu(i, policy->cpus) __gov_queue_work(i, dbs_data, delay); } + +out_unlock: + mutex_unlock(&cpufreq_governor_lock); } EXPORT_SYMBOL_GPL(gov_queue_work); @@ -314,11 +372,18 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); + unsigned int prev_load; j_cdbs->cpu = j; j_cdbs->cur_policy = policy; j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); + + prev_load = (unsigned int) + (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); + j_cdbs->prev_load = 100 * prev_load / + (unsigned int) j_cdbs->prev_cpu_wall; + if (ignore_nice) j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; @@ -328,10 +393,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_data->cdata->gov_dbs_timer); } - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { cs_dbs_info->down_skip = 0; cs_dbs_info->enable = 1; @@ -366,6 +427,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_data->mutex); + if (!cpu_cdbs->cur_policy) { + mutex_unlock(&dbs_data->mutex); + break; + } mutex_lock(&cpu_cdbs->timer_mutex); if (policy->max < cpu_cdbs->cur_policy->cur) __cpufreq_driver_target(cpu_cdbs->cur_policy, @@ -375,6 +441,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, policy->min, CPUFREQ_RELATION_L); dbs_check_cpu(dbs_data, cpu); mutex_unlock(&cpu_cdbs->timer_mutex); + mutex_unlock(&dbs_data->mutex); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 88cd39f7b0e..cc401d147e7 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -134,6 +134,13 @@ struct cpu_dbs_common_info { u64 prev_cpu_idle; u64 prev_cpu_wall; u64 prev_cpu_nice; + /* + * Used to keep track of load in the previous interval. However, when + * explicitly set to zero, it is used as a flag to ensure that we copy + * the previous load to the current interval only once, upon the first + * wake-up from idle. + */ + unsigned int prev_load; struct cpufreq_policy *cur_policy; struct delayed_work work; /* @@ -191,7 +198,10 @@ struct common_dbs_data { struct attribute_group *attr_group_gov_sys; /* one governor - system */ struct attribute_group *attr_group_gov_pol; /* one governor - policy */ - /* Common data for platforms that don't set have_governor_per_policy */ + /* + * Common data for platforms that don't set + * CPUFREQ_HAVE_GOVERNOR_PER_POLICY + */ struct dbs_data *gdbs_data; struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); @@ -254,6 +264,8 @@ static ssize_t show_sampling_rate_min_gov_pol \ return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ } +extern struct mutex cpufreq_governor_lock; + void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 32f26f6e17c..18d40918909 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -168,7 +168,6 @@ static void od_check_cpu(int cpu, unsigned int load) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); - return; } else { /* Calculate the next frequency proportional to load */ unsigned int freq_next; diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c new file mode 100644 index 00000000000..c0c6f4a4ecc --- /dev/null +++ b/drivers/cpufreq/cpufreq_opp.c @@ -0,0 +1,110 @@ +/* + * Generic OPP helper interface for CPUFreq drivers + * + * Copyright (C) 2009-2014 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/pm_opp.h> +#include <linux/rcupdate.h> +#include <linux/slab.h> + +/** + * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Since we just use the regular accessor functions to access the internal data + * structures, we use RCU read lock inside this function. As a result, users of + * this function DONOT need to use explicit locks for invoking. + */ +int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct dev_pm_opp *opp; + struct cpufreq_frequency_table *freq_table = NULL; + int i, max_opps, ret = 0; + unsigned long rate; + + rcu_read_lock(); + + max_opps = dev_pm_opp_get_opp_count(dev); + if (max_opps <= 0) { + ret = max_opps ? max_opps : -ENODATA; + goto out; + } + + freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL); + if (!freq_table) { + ret = -ENOMEM; + goto out; + } + + for (i = 0, rate = 0; i < max_opps; i++, rate++) { + /* find next rate */ + opp = dev_pm_opp_find_freq_ceil(dev, &rate); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto out; + } + freq_table[i].driver_data = i; + freq_table[i].frequency = rate / 1000; + } + + freq_table[i].driver_data = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + +out: + rcu_read_unlock(); + if (ret) + kfree(freq_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); + +/** + * dev_pm_opp_free_cpufreq_table() - free the cpufreq table + * @dev: device for which we do this operation + * @table: table to free + * + * Free up the table allocated by dev_pm_opp_init_cpufreq_table + */ +void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + if (!table) + return; + + kfree(*table); + *table = NULL; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 4cf0d2805cb..0cd9b4dcef9 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -13,7 +13,7 @@ #include <linux/cpufreq.h> #include <linux/module.h> #include <linux/slab.h> -#include <asm/cputime.h> +#include <linux/cputime.h> static spinlock_t cpufreq_stats_lock; @@ -151,76 +151,62 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) return -1; } -/* should be called late in the CPU removal sequence so that the stats - * memory is still available in case someone tries to use it. - */ -static void cpufreq_stats_free_table(unsigned int cpu) +static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) { - struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); + struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); - if (stat) { - pr_debug("%s: Free stat table\n", __func__); - kfree(stat->time_in_state); - kfree(stat); - per_cpu(cpufreq_stats_table, cpu) = NULL; - } + if (!stat) + return; + + pr_debug("%s: Free stat table\n", __func__); + + sysfs_remove_group(&policy->kobj, &stats_attr_group); + kfree(stat->time_in_state); + kfree(stat); + per_cpu(cpufreq_stats_table, policy->cpu) = NULL; } -/* must be called early in the CPU removal sequence (before - * cpufreq_remove_dev) so that policy is still valid. - */ -static void cpufreq_stats_free_sysfs(unsigned int cpu) +static void cpufreq_stats_free_table(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy; + policy = cpufreq_cpu_get(cpu); if (!policy) return; - if (!cpufreq_frequency_get_table(cpu)) - goto put_ref; - - if (!policy_is_shared(policy)) { - pr_debug("%s: Free sysfs stat\n", __func__); - sysfs_remove_group(&policy->kobj, &stats_attr_group); - } + if (cpufreq_frequency_get_table(policy->cpu)) + __cpufreq_stats_free_table(policy); -put_ref: cpufreq_cpu_put(policy); } -static int cpufreq_stats_create_table(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table) +static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) { - unsigned int i, j, count = 0, ret = 0; + unsigned int i, count = 0, ret = 0; struct cpufreq_stats *stat; - struct cpufreq_policy *current_policy; unsigned int alloc_size; unsigned int cpu = policy->cpu; + struct cpufreq_frequency_table *pos, *table; + + table = cpufreq_frequency_get_table(cpu); + if (unlikely(!table)) + return 0; + if (per_cpu(cpufreq_stats_table, cpu)) return -EBUSY; stat = kzalloc(sizeof(*stat), GFP_KERNEL); if ((stat) == NULL) return -ENOMEM; - current_policy = cpufreq_cpu_get(cpu); - if (current_policy == NULL) { - ret = -EINVAL; - goto error_get_fail; - } - - ret = sysfs_create_group(¤t_policy->kobj, &stats_attr_group); + ret = sysfs_create_group(&policy->kobj, &stats_attr_group); if (ret) goto error_out; stat->cpu = cpu; per_cpu(cpufreq_stats_table, cpu) = stat; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) - continue; + cpufreq_for_each_valid_entry(pos, table) count++; - } alloc_size = count * sizeof(int) + count * sizeof(u64); @@ -231,36 +217,48 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); if (!stat->time_in_state) { ret = -ENOMEM; - goto error_out; + goto error_alloc; } stat->freq_table = (unsigned int *)(stat->time_in_state + count); #ifdef CONFIG_CPU_FREQ_STAT_DETAILS stat->trans_table = stat->freq_table + count; #endif - j = 0; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) - continue; - if (freq_table_get_index(stat, freq) == -1) - stat->freq_table[j++] = freq; - } - stat->state_num = j; + i = 0; + cpufreq_for_each_valid_entry(pos, table) + if (freq_table_get_index(stat, pos->frequency) == -1) + stat->freq_table[i++] = pos->frequency; + stat->state_num = i; spin_lock(&cpufreq_stats_lock); stat->last_time = get_jiffies_64(); stat->last_index = freq_table_get_index(stat, policy->cur); spin_unlock(&cpufreq_stats_lock); - cpufreq_cpu_put(current_policy); return 0; +error_alloc: + sysfs_remove_group(&policy->kobj, &stats_attr_group); error_out: - cpufreq_cpu_put(current_policy); -error_get_fail: kfree(stat); per_cpu(cpufreq_stats_table, cpu) = NULL; return ret; } +static void cpufreq_stats_create_table(unsigned int cpu) +{ + struct cpufreq_policy *policy; + + /* + * "likely(!policy)" because normally cpufreq_stats will be registered + * before cpufreq driver + */ + policy = cpufreq_cpu_get(cpu); + if (likely(!policy)) + return; + + __cpufreq_stats_create_table(policy); + + cpufreq_cpu_put(policy); +} + static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) { struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, @@ -277,25 +275,20 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) static int cpufreq_stat_notifier_policy(struct notifier_block *nb, unsigned long val, void *data) { - int ret; + int ret = 0; struct cpufreq_policy *policy = data; - struct cpufreq_frequency_table *table; - unsigned int cpu = policy->cpu; if (val == CPUFREQ_UPDATE_POLICY_CPU) { cpufreq_stats_update_policy_cpu(policy); return 0; } - if (val != CPUFREQ_NOTIFY) - return 0; - table = cpufreq_frequency_get_table(cpu); - if (!table) - return 0; - ret = cpufreq_stats_create_table(policy, table); - if (ret) - return ret; - return 0; + if (val == CPUFREQ_CREATE_POLICY) + ret = __cpufreq_stats_create_table(policy); + else if (val == CPUFREQ_REMOVE_POLICY) + __cpufreq_stats_free_table(policy); + + return ret; } static int cpufreq_stat_notifier_trans(struct notifier_block *nb, @@ -334,29 +327,6 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb, return 0; } -static int cpufreq_stat_cpu_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_DOWN_PREPARE: - cpufreq_stats_free_sysfs(cpu); - break; - case CPU_DEAD: - cpufreq_stats_free_table(cpu); - break; - } - return NOTIFY_OK; -} - -/* priority=1 so this will get called before cpufreq_remove_dev */ -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { - .notifier_call = cpufreq_stat_cpu_callback, - .priority = 1, -}; - static struct notifier_block notifier_policy_block = { .notifier_call = cpufreq_stat_notifier_policy }; @@ -376,14 +346,14 @@ static int __init cpufreq_stats_init(void) if (ret) return ret; - register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); + for_each_online_cpu(cpu) + cpufreq_stats_create_table(cpu); ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); - unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) cpufreq_stats_free_table(cpu); return ret; @@ -399,11 +369,8 @@ static void __exit cpufreq_stats_exit(void) CPUFREQ_POLICY_NOTIFIER); cpufreq_unregister_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); - unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); - for_each_online_cpu(cpu) { + for_each_online_cpu(cpu) cpufreq_stats_free_table(cpu); - cpufreq_stats_free_sysfs(cpu); - } } MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 03078090b5f..4dbf1db16ac 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -38,18 +38,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) if (!per_cpu(cpu_is_managed, policy->cpu)) goto err; - /* - * We're safe from concurrent calls to ->target() here - * as we hold the userspace_mutex lock. If we were calling - * cpufreq_driver_target, a deadlock situation might occur: - * A: cpufreq_set (lock userspace_mutex) -> - * cpufreq_driver_target(lock policy->lock) - * B: cpufreq_set_policy(lock policy->lock) -> - * __cpufreq_governor -> - * cpufreq_governor_userspace (lock userspace_mutex) - */ ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); - err: mutex_unlock(&userspace_mutex); return ret; diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c index cb8276dd19c..601b88c490c 100644 --- a/drivers/cpufreq/cris-artpec3-cpufreq.c +++ b/drivers/cpufreq/cris-artpec3-cpufreq.c @@ -15,9 +15,9 @@ static struct notifier_block cris_sdram_freq_notifier_block = { }; static struct cpufreq_frequency_table cris_freq_table[] = { - {0x01, 6000}, - {0x02, 200000}, - {0, CPUFREQ_TABLE_END}, + {0, 0x01, 6000}, + {0, 0x02, 200000}, + {0, 0, CPUFREQ_TABLE_END}, }; static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) @@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) return clk_ctrl.pll ? 200000 : 6000; } -static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) +static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) { - struct cpufreq_freqs freqs; reg_clkgen_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); - freqs.old = cris_freq_get_cpu_frequency(policy->cpu); - freqs.new = cris_freq_table[state].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - local_irq_disable(); /* Even though we may be SMP they will share the same clock @@ -51,67 +44,21 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int cris_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); -} - -static int cris_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target(policy, cris_freq_table, - target_freq, relation, &newstate)) - return -EINVAL; - - cris_freq_set_cpu_state(policy, newstate); - return 0; } static int cris_freq_cpu_init(struct cpufreq_policy *policy) { - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = cris_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); - if (result) - return (result); - - cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); - - return 0; -} - - -static int cris_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_generic_init(policy, cris_freq_table, 1000000); } - -static struct freq_attr *cris_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver cris_freq_driver = { .get = cris_freq_get_cpu_frequency, - .verify = cris_freq_verify, - .target = cris_freq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cris_freq_target, .init = cris_freq_cpu_init, - .exit = cris_freq_cpu_exit, .name = "cris_freq", - .attr = cris_freq_attr, + .attr = cpufreq_generic_attr, }; static int __init cris_freq_init(void) diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c index 72328f77dc5..22b2cdde74d 100644 --- a/drivers/cpufreq/cris-etraxfs-cpufreq.c +++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c @@ -15,9 +15,9 @@ static struct notifier_block cris_sdram_freq_notifier_block = { }; static struct cpufreq_frequency_table cris_freq_table[] = { - {0x01, 6000}, - {0x02, 200000}, - {0, CPUFREQ_TABLE_END}, + {0, 0x01, 6000}, + {0, 0x02, 200000}, + {0, 0, CPUFREQ_TABLE_END}, }; static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) @@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) return clk_ctrl.pll ? 200000 : 6000; } -static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) +static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) { - struct cpufreq_freqs freqs; reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); - freqs.old = cris_freq_get_cpu_frequency(policy->cpu); - freqs.new = cris_freq_table[state].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - local_irq_disable(); /* Even though we may be SMP they will share the same clock @@ -51,64 +44,21 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int cris_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); -} - -static int cris_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target - (policy, cris_freq_table, target_freq, relation, &newstate)) - return -EINVAL; - - cris_freq_set_cpu_state(policy, newstate); - return 0; } static int cris_freq_cpu_init(struct cpufreq_policy *policy) { - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = cris_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); - if (result) - return (result); - - cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); - - return 0; + return cpufreq_generic_init(policy, cris_freq_table, 1000000); } -static int cris_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *cris_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver cris_freq_driver = { .get = cris_freq_get_cpu_frequency, - .verify = cris_freq_verify, - .target = cris_freq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cris_freq_target, .init = cris_freq_cpu_init, - .exit = cris_freq_cpu_exit, .name = "cris_freq", - .attr = cris_freq_attr, + .attr = cpufreq_generic_attr, }; static int __init cris_freq_init(void) diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index 551dd655c6f..28a16dc6e02 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c @@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy) if (policy->cpu) return -EINVAL; - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - + cpufreq_verify_within_cpu_limits(policy); policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, @@ -60,66 +58,38 @@ static int davinci_verify_speed(struct cpufreq_policy *policy) return 0; } -static unsigned int davinci_getspeed(unsigned int cpu) -{ - if (cpu) - return 0; - - return clk_get_rate(cpufreq.armclk) / 1000; -} - -static int davinci_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) +static int davinci_target(struct cpufreq_policy *policy, unsigned int idx) { - int ret = 0; - unsigned int idx; - struct cpufreq_freqs freqs; struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; struct clk *armclk = cpufreq.armclk; + unsigned int old_freq, new_freq; + int ret = 0; - freqs.old = davinci_getspeed(0); - freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; - - if (freqs.old == freqs.new) - return ret; - - dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); - - ret = cpufreq_frequency_table_target(policy, pdata->freq_table, - freqs.new, relation, &idx); - if (ret) - return -EINVAL; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + old_freq = policy->cur; + new_freq = pdata->freq_table[idx].frequency; /* if moving to higher frequency, up the voltage beforehand */ - if (pdata->set_voltage && freqs.new > freqs.old) { + if (pdata->set_voltage && new_freq > old_freq) { ret = pdata->set_voltage(idx); if (ret) - goto out; + return ret; } ret = clk_set_rate(armclk, idx); if (ret) - goto out; + return ret; if (cpufreq.asyncclk) { ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate); if (ret) - goto out; + return ret; } /* if moving to lower freq, lower the voltage after lowering freq */ - if (pdata->set_voltage && freqs.new < freqs.old) + if (pdata->set_voltage && new_freq < old_freq) pdata->set_voltage(idx); -out: - if (ret) - freqs.new = freqs.old; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return ret; + return 0; } static int davinci_cpu_init(struct cpufreq_policy *policy) @@ -138,16 +108,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) return result; } - policy->cur = davinci_getspeed(0); - - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (result) { - pr_err("%s: cpufreq_frequency_table_cpuinfo() failed", - __func__); - return result; - } - - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + policy->clk = cpufreq.armclk; /* * Time measurement across the target() function yields ~1500-1800us @@ -155,30 +116,17 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) * Setting the latency to 2000 us to accommodate addition of drivers * to pre/post change notification list. */ - policy->cpuinfo.transition_latency = 2000 * 1000; - return 0; + return cpufreq_generic_init(policy, freq_table, 2000 * 1000); } -static int davinci_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *davinci_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver davinci_driver = { - .flags = CPUFREQ_STICKY, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = davinci_verify_speed, - .target = davinci_target, - .get = davinci_getspeed, + .target_index = davinci_target, + .get = cpufreq_generic_get, .init = davinci_cpu_init, - .exit = davinci_cpu_exit, .name = "davinci", - .attr = davinci_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int __init davinci_cpufreq_probe(struct platform_device *pdev) diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c index 26321cdc194..4bebc1b5db4 100644 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ b/drivers/cpufreq/dbx500-cpufreq.c @@ -19,113 +19,33 @@ static struct cpufreq_frequency_table *freq_table; static struct clk *armss_clk; -static struct freq_attr *dbx500_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, freq_table); -} - static int dbx500_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { - struct cpufreq_freqs freqs; - unsigned int idx; - int ret; - - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &idx)) - return -EINVAL; - - freqs.old = policy->cur; - freqs.new = freq_table[idx].frequency; - - if (freqs.old == freqs.new) - return 0; - - /* pre-change notification */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* update armss clk frequency */ - ret = clk_set_rate(armss_clk, freqs.new * 1000); - - if (ret) { - pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n", - freqs.new * 1000, ret); - freqs.new = freqs.old; - } - - /* post change notification */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return ret; -} - -static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) -{ - int i = 0; - unsigned long freq = clk_get_rate(armss_clk) / 1000; - - /* The value is rounded to closest frequency in the defined table. */ - while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) { - if (freq < freq_table[i].frequency + - (freq_table[i + 1].frequency - freq_table[i].frequency) / 2) - return freq_table[i].frequency; - i++; - } - - return freq_table[i].frequency; + return clk_set_rate(armss_clk, freq_table[index].frequency * 1000); } static int dbx500_cpufreq_init(struct cpufreq_policy *policy) { - int res; - - /* get policy fields based on the table */ - res = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!res) - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - else { - pr_err("dbx500-cpufreq: Failed to read policy table\n"); - return res; - } - - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - policy->cur = dbx500_cpufreq_getspeed(policy->cpu); - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; - - /* - * FIXME : Need to take time measurement across the target() - * function with no/some/all drivers in the notification - * list. - */ - policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ - - /* policy sharing between dual CPUs */ - cpumask_setall(policy->cpus); - - return 0; + policy->clk = armss_clk; + return cpufreq_generic_init(policy, freq_table, 20 * 1000); } static struct cpufreq_driver dbx500_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, - .verify = dbx500_cpufreq_verify_speed, - .target = dbx500_cpufreq_target, - .get = dbx500_cpufreq_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = dbx500_cpufreq_target, + .get = cpufreq_generic_get, .init = dbx500_cpufreq_init, .name = "DBX500", - .attr = dbx500_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int dbx500_cpufreq_probe(struct platform_device *pdev) { - int i = 0; + struct cpufreq_frequency_table *pos; freq_table = dev_get_platdata(&pdev->dev); if (!freq_table) { @@ -140,10 +60,8 @@ static int dbx500_cpufreq_probe(struct platform_device *pdev) } pr_info("dbx500-cpufreq: Available frequencies:\n"); - while (freq_table[i].frequency != CPUFREQ_TABLE_END) { - pr_info(" %d Mhz\n", freq_table[i].frequency/1000); - i++; - } + cpufreq_for_each_entry(pos, freq_table) + pr_info(" %d Mhz\n", pos->frequency / 1000); return cpufreq_register_driver(&dbx500_cpufreq_driver); } diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 09f64cc8301..a0d2a423cea 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -107,15 +107,9 @@ static int eps_set_state(struct eps_cpu_data *centaur, struct cpufreq_policy *policy, u32 dest_state) { - struct cpufreq_freqs freqs; u32 lo, hi; - int err = 0; int i; - freqs.old = eps_get(policy->cpu); - freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* Wait while CPU is busy */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i = 0; @@ -124,8 +118,7 @@ static int eps_set_state(struct eps_cpu_data *centaur, rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i++; if (unlikely(i > 64)) { - err = -ENODEV; - goto postchange; + return -ENODEV; } } /* Set new multiplier and voltage */ @@ -137,16 +130,10 @@ static int eps_set_state(struct eps_cpu_data *centaur, rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i++; if (unlikely(i > 64)) { - err = -ENODEV; - goto postchange; + return -ENODEV; } } while (lo & ((1 << 16) | (1 << 17))); - /* Return current frequency */ -postchange: - rdmsr(MSR_IA32_PERF_STATUS, lo, hi); - freqs.new = centaur->fsb * ((lo >> 8) & 0xff); - #ifdef DEBUG { u8 current_multiplier, current_voltage; @@ -161,19 +148,12 @@ postchange: current_multiplier); } #endif - if (err) - freqs.new = freqs.old; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return err; + return 0; } -static int eps_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int eps_target(struct cpufreq_policy *policy, unsigned int index) { struct eps_cpu_data *centaur; - unsigned int newstate = 0; unsigned int cpu = policy->cpu; unsigned int dest_state; int ret; @@ -182,28 +162,14 @@ static int eps_target(struct cpufreq_policy *policy, return -ENODEV; centaur = eps_cpu[cpu]; - if (unlikely(cpufreq_frequency_table_target(policy, - &eps_cpu[cpu]->freq_table[0], - target_freq, - relation, - &newstate))) { - return -EINVAL; - } - /* Make frequency transition */ - dest_state = centaur->freq_table[newstate].driver_data & 0xffff; + dest_state = centaur->freq_table[index].driver_data & 0xffff; ret = eps_set_state(centaur, policy, dest_state); if (ret) printk(KERN_ERR "eps: Timeout!\n"); return ret; } -static int eps_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &eps_cpu[policy->cpu]->freq_table[0]); -} - static int eps_cpu_init(struct cpufreq_policy *policy) { unsigned int i; @@ -401,15 +367,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy) } policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ - policy->cur = fsb * current_multiplier; - ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]); + ret = cpufreq_table_validate_and_show(policy, ¢aur->freq_table[0]); if (ret) { kfree(centaur); return ret; } - cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu); return 0; } @@ -418,25 +382,19 @@ static int eps_cpu_exit(struct cpufreq_policy *policy) unsigned int cpu = policy->cpu; /* Bye */ - cpufreq_frequency_table_put_attr(policy->cpu); kfree(eps_cpu[cpu]); eps_cpu[cpu] = NULL; return 0; } -static struct freq_attr *eps_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver eps_driver = { - .verify = eps_verify, - .target = eps_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = eps_target, .init = eps_cpu_init, .exit = eps_cpu_exit, .get = eps_get, .name = "e_powersaver", - .attr = eps_attr, + .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 823a400d98f..1c06e786c9b 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -56,15 +56,15 @@ static struct s_elan_multiplier elan_multiplier[] = { }; static struct cpufreq_frequency_table elanfreq_table[] = { - {0, 1000}, - {1, 2000}, - {2, 4000}, - {3, 8000}, - {4, 16000}, - {5, 33000}, - {6, 66000}, - {7, 99000}, - {0, CPUFREQ_TABLE_END}, + {0, 0, 1000}, + {0, 1, 2000}, + {0, 2, 4000}, + {0, 3, 8000}, + {0, 4, 16000}, + {0, 5, 33000}, + {0, 6, 66000}, + {0, 7, 99000}, + {0, 0, CPUFREQ_TABLE_END}, }; @@ -105,32 +105,9 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) } -/** - * elanfreq_set_cpu_frequency: Change the CPU core frequency - * @cpu: cpu number - * @freq: frequency in kHz - * - * This function takes a frequency value and changes the CPU frequency - * according to this. Note that the frequency has to be checked by - * elanfreq_validatespeed() for correctness! - * - * There is no return value. - */ - -static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) +static int elanfreq_target(struct cpufreq_policy *policy, + unsigned int state) { - struct cpufreq_freqs freqs; - - freqs.old = elanfreq_get_cpu_frequency(0); - freqs.new = elan_multiplier[state].clock; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", - elan_multiplier[state].clock); - - /* * Access to the Elan's internal registers is indexed via * 0x22: Chip Setup & Control Register Index Register (CSCI) @@ -161,39 +138,8 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, udelay(10000); local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - - -/** - * elanfreq_validatespeed: test if frequency range is valid - * @policy: the policy to validate - * - * This function checks if a given frequency range in kHz is valid - * for the hardware supported by the driver. - */ - -static int elanfreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); -} - -static int elanfreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], - target_freq, relation, &newstate)) - return -EINVAL; - - elanfreq_set_cpu_state(policy, newstate); - return 0; } - - /* * Module init and exit code */ @@ -201,8 +147,7 @@ static int elanfreq_target(struct cpufreq_policy *policy, static int elanfreq_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); - unsigned int i; - int result; + struct cpufreq_frequency_table *pos; /* capability check */ if ((c->x86_vendor != X86_VENDOR_AMD) || @@ -214,28 +159,14 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) max_freq = elanfreq_get_cpu_frequency(0); /* table init */ - for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { - if (elanfreq_table[i].frequency > max_freq) - elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; - } + cpufreq_for_each_entry(pos, elanfreq_table) + if (pos->frequency > max_freq) + pos->frequency = CPUFREQ_ENTRY_INVALID; /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = elanfreq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); - if (result) - return result; - cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); - return 0; -} - - -static int elanfreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_table_validate_and_show(policy, elanfreq_table); } @@ -261,20 +192,13 @@ __setup("elanfreq=", elanfreq_setup); #endif -static struct freq_attr *elanfreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - static struct cpufreq_driver elanfreq_driver = { .get = elanfreq_get_cpu_frequency, - .verify = elanfreq_verify, - .target = elanfreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = elanfreq_target, .init = elanfreq_cpu_init, - .exit = elanfreq_cpu_exit, .name = "elanfreq", - .attr = elanfreq_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id elan_id[] = { diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 0fac34439e3..1e0ec57bf6e 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -16,46 +16,28 @@ #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <linux/cpufreq.h> -#include <linux/suspend.h> - -#include <plat/cpu.h> +#include <linux/platform_device.h> +#include <linux/of.h> #include "exynos-cpufreq.h" static struct exynos_dvfs_info *exynos_info; - static struct regulator *arm_regulator; -static struct cpufreq_freqs freqs; - static unsigned int locking_frequency; -static bool frequency_locked; -static DEFINE_MUTEX(cpufreq_lock); - -static int exynos_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - exynos_info->freq_table); -} - -static unsigned int exynos_getspeed(unsigned int cpu) -{ - return clk_get_rate(exynos_info->cpu_clk) / 1000; -} static int exynos_cpufreq_get_index(unsigned int freq) { struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; - int index; + struct cpufreq_frequency_table *pos; - for (index = 0; - freq_table[index].frequency != CPUFREQ_TABLE_END; index++) - if (freq_table[index].frequency == freq) + cpufreq_for_each_entry(pos, freq_table) + if (pos->frequency == freq) break; - if (freq_table[index].frequency == CPUFREQ_TABLE_END) + if (pos->frequency == CPUFREQ_TABLE_END) return -EINVAL; - return index; + return pos - freq_table; } static int exynos_cpufreq_scale(unsigned int target_freq) @@ -65,21 +47,19 @@ static int exynos_cpufreq_scale(unsigned int target_freq) struct cpufreq_policy *policy = cpufreq_cpu_get(0); unsigned int arm_volt, safe_arm_volt = 0; unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; + struct device *dev = exynos_info->dev; + unsigned int old_freq; int index, old_index; int ret = 0; - freqs.old = policy->cur; - freqs.new = target_freq; - - if (freqs.new == freqs.old) - goto out; + old_freq = policy->cur; /* * The policy max have been changed so that we cannot get proper * old_index with cpufreq_frequency_table_target(). Thus, ignore - * policy and get the index from the raw freqeuncy table. + * policy and get the index from the raw frequency table. */ - old_index = exynos_cpufreq_get_index(freqs.old); + old_index = exynos_cpufreq_get_index(old_freq); if (old_index < 0) { ret = old_index; goto out; @@ -104,17 +84,14 @@ static int exynos_cpufreq_scale(unsigned int target_freq) } arm_volt = volt_table[index]; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* When the new frequency is higher than current frequency */ - if ((freqs.new > freqs.old) && !safe_arm_volt) { + if ((target_freq > old_freq) && !safe_arm_volt) { /* Firstly, voltage up to increase frequency */ ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt); if (ret) { - pr_err("%s: failed to set cpu voltage to %d\n", - __func__, arm_volt); - freqs.new = freqs.old; - goto post_notify; + dev_err(dev, "failed to set cpu voltage to %d\n", + arm_volt); + return ret; } } @@ -122,170 +99,62 @@ static int exynos_cpufreq_scale(unsigned int target_freq) ret = regulator_set_voltage(arm_regulator, safe_arm_volt, safe_arm_volt); if (ret) { - pr_err("%s: failed to set cpu voltage to %d\n", - __func__, safe_arm_volt); - freqs.new = freqs.old; - goto post_notify; + dev_err(dev, "failed to set cpu voltage to %d\n", + safe_arm_volt); + return ret; } } exynos_info->set_freq(old_index, index); -post_notify: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - if (ret) - goto out; - /* When the new frequency is lower than current frequency */ - if ((freqs.new < freqs.old) || - ((freqs.new > freqs.old) && safe_arm_volt)) { + if ((target_freq < old_freq) || + ((target_freq > old_freq) && safe_arm_volt)) { /* down the voltage after frequency change */ - regulator_set_voltage(arm_regulator, arm_volt, + ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt); if (ret) { - pr_err("%s: failed to set cpu voltage to %d\n", - __func__, arm_volt); + dev_err(dev, "failed to set cpu voltage to %d\n", + arm_volt); goto out; } } out: - cpufreq_cpu_put(policy); return ret; } -static int exynos_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; - unsigned int index; - unsigned int new_freq; - int ret = 0; - - mutex_lock(&cpufreq_lock); - - if (frequency_locked) - goto out; - - if (cpufreq_frequency_table_target(policy, freq_table, - target_freq, relation, &index)) { - ret = -EINVAL; - goto out; - } - - new_freq = freq_table[index].frequency; - - ret = exynos_cpufreq_scale(new_freq); - -out: - mutex_unlock(&cpufreq_lock); - - return ret; -} - -#ifdef CONFIG_PM -static int exynos_cpufreq_suspend(struct cpufreq_policy *policy) +static int exynos_target(struct cpufreq_policy *policy, unsigned int index) { - return 0; + return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency); } -static int exynos_cpufreq_resume(struct cpufreq_policy *policy) -{ - return 0; -} -#endif - -/** - * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume - * context - * @notifier - * @pm_event - * @v - * - * While frequency_locked == true, target() ignores every frequency but - * locking_frequency. The locking_frequency value is the initial frequency, - * which is set by the bootloader. In order to eliminate possible - * inconsistency in clock values, we save and restore frequencies during - * suspend and resume and block CPUFREQ activities. Note that the standard - * suspend/resume cannot be used as they are too deep (syscore_ops) for - * regulator actions. - */ -static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier, - unsigned long pm_event, void *v) -{ - int ret; - - switch (pm_event) { - case PM_SUSPEND_PREPARE: - mutex_lock(&cpufreq_lock); - frequency_locked = true; - mutex_unlock(&cpufreq_lock); - - ret = exynos_cpufreq_scale(locking_frequency); - if (ret < 0) - return NOTIFY_BAD; - - break; - - case PM_POST_SUSPEND: - mutex_lock(&cpufreq_lock); - frequency_locked = false; - mutex_unlock(&cpufreq_lock); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block exynos_cpufreq_nb = { - .notifier_call = exynos_cpufreq_pm_notifier, -}; - static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) { - policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu); - - cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); - - /* set the transition latency value */ - policy->cpuinfo.transition_latency = 100000; - - cpumask_setall(policy->cpus); - - return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); -} - -static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + policy->clk = exynos_info->cpu_clk; + policy->suspend_freq = locking_frequency; + return cpufreq_generic_init(policy, exynos_info->freq_table, 100000); } -static struct freq_attr *exynos_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver exynos_driver = { - .flags = CPUFREQ_STICKY, - .verify = exynos_verify_speed, - .target = exynos_target, - .get = exynos_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = exynos_target, + .get = cpufreq_generic_get, .init = exynos_cpufreq_cpu_init, - .exit = exynos_cpufreq_cpu_exit, .name = "exynos_cpufreq", - .attr = exynos_cpufreq_attr, + .attr = cpufreq_generic_attr, +#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW + .boost_supported = true, +#endif #ifdef CONFIG_PM - .suspend = exynos_cpufreq_suspend, - .resume = exynos_cpufreq_resume, + .suspend = cpufreq_generic_suspend, #endif }; -static int __init exynos_cpufreq_init(void) +static int exynos_cpufreq_probe(struct platform_device *pdev) { int ret = -EINVAL; @@ -293,45 +162,57 @@ static int __init exynos_cpufreq_init(void) if (!exynos_info) return -ENOMEM; - if (soc_is_exynos4210()) + exynos_info->dev = &pdev->dev; + + if (of_machine_is_compatible("samsung,exynos4210")) { + exynos_info->type = EXYNOS_SOC_4210; ret = exynos4210_cpufreq_init(exynos_info); - else if (soc_is_exynos4212() || soc_is_exynos4412()) + } else if (of_machine_is_compatible("samsung,exynos4212")) { + exynos_info->type = EXYNOS_SOC_4212; ret = exynos4x12_cpufreq_init(exynos_info); - else if (soc_is_exynos5250()) + } else if (of_machine_is_compatible("samsung,exynos4412")) { + exynos_info->type = EXYNOS_SOC_4412; + ret = exynos4x12_cpufreq_init(exynos_info); + } else if (of_machine_is_compatible("samsung,exynos5250")) { + exynos_info->type = EXYNOS_SOC_5250; ret = exynos5250_cpufreq_init(exynos_info); - else - return 0; + } else { + pr_err("%s: Unknown SoC type\n", __func__); + return -ENODEV; + } if (ret) goto err_vdd_arm; if (exynos_info->set_freq == NULL) { - pr_err("%s: No set_freq function (ERR)\n", __func__); + dev_err(&pdev->dev, "No set_freq function (ERR)\n"); goto err_vdd_arm; } arm_regulator = regulator_get(NULL, "vdd_arm"); if (IS_ERR(arm_regulator)) { - pr_err("%s: failed to get resource vdd_arm\n", __func__); + dev_err(&pdev->dev, "failed to get resource vdd_arm\n"); goto err_vdd_arm; } - locking_frequency = exynos_getspeed(0); + /* Done here as we want to capture boot frequency */ + locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000; - register_pm_notifier(&exynos_cpufreq_nb); - - if (cpufreq_register_driver(&exynos_driver)) { - pr_err("%s: failed to register cpufreq driver\n", __func__); - goto err_cpufreq; - } - - return 0; -err_cpufreq: - unregister_pm_notifier(&exynos_cpufreq_nb); + if (!cpufreq_register_driver(&exynos_driver)) + return 0; + dev_err(&pdev->dev, "failed to register cpufreq driver\n"); regulator_put(arm_regulator); err_vdd_arm: kfree(exynos_info); return -EINVAL; } -late_initcall(exynos_cpufreq_init); + +static struct platform_driver exynos_cpufreq_platdrv = { + .driver = { + .name = "exynos-cpufreq", + .owner = THIS_MODULE, + }, + .probe = exynos_cpufreq_probe, +}; +module_platform_driver(exynos_cpufreq_platdrv); diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h index 7f25cee8cec..9f2062a7cc0 100644 --- a/drivers/cpufreq/exynos-cpufreq.h +++ b/drivers/cpufreq/exynos-cpufreq.h @@ -17,6 +17,13 @@ enum cpufreq_level_index { L20, }; +enum exynos_soc_type { + EXYNOS_SOC_4210, + EXYNOS_SOC_4212, + EXYNOS_SOC_4412, + EXYNOS_SOC_5250, +}; + #define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \ { \ .freq = (f) * 1000, \ @@ -34,6 +41,8 @@ struct apll_freq { }; struct exynos_dvfs_info { + enum exynos_soc_type type; + struct device *dev; unsigned long mpll_freq_khz; unsigned int pll_safe_idx; struct clk *cpu_clk; @@ -41,6 +50,7 @@ struct exynos_dvfs_info { struct cpufreq_frequency_table *freq_table; void (*set_freq)(unsigned int, unsigned int); bool (*need_apll_change)(unsigned int, unsigned int); + void __iomem *cmu_regs; }; #ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ @@ -67,3 +77,22 @@ static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) return -EOPNOTSUPP; } #endif + +#define EXYNOS4_CLKSRC_CPU 0x14200 +#define EXYNOS4_CLKMUX_STATCPU 0x14400 + +#define EXYNOS4_CLKDIV_CPU 0x14500 +#define EXYNOS4_CLKDIV_CPU1 0x14504 +#define EXYNOS4_CLKDIV_STATCPU 0x14600 +#define EXYNOS4_CLKDIV_STATCPU1 0x14604 + +#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16) +#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT) + +#define EXYNOS5_APLL_LOCK 0x00000 +#define EXYNOS5_APLL_CON0 0x00100 +#define EXYNOS5_CLKMUX_STATCPU 0x00400 +#define EXYNOS5_CLKDIV_CPU0 0x00500 +#define EXYNOS5_CLKDIV_CPU1 0x00504 +#define EXYNOS5_CLKDIV_STATCPU0 0x00600 +#define EXYNOS5_CLKDIV_STATCPU1 0x00604 diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index add7fbec4fc..61a54310a1b 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -16,8 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> - -#include <mach/regs-clock.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -25,18 +25,19 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos4210_volt_table[] = { 1250000, 1150000, 1050000, 975000, 950000, }; static struct cpufreq_frequency_table exynos4210_freq_table[] = { - {L0, 1200 * 1000}, - {L1, 1000 * 1000}, - {L2, 800 * 1000}, - {L3, 500 * 1000}, - {L4, 200 * 1000}, - {0, CPUFREQ_TABLE_END}, + {0, L0, 1200 * 1000}, + {0, L1, 1000 * 1000}, + {0, L2, 800 * 1000}, + {0, L3, 500 * 1000}, + {0, L4, 200 * 1000}, + {0, 0, CPUFREQ_TABLE_END}, }; static struct apll_freq apll_freq_4210[] = { @@ -62,113 +63,85 @@ static void exynos4210_set_clkdiv(unsigned int div_index) tmp = apll_freq_4210[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); do { - tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU); } while (tmp & 0x1111111); /* Change Divider - CPU1 */ tmp = apll_freq_4210[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); do { - tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); } while (tmp & 0x11); } static void exynos4210_set_apll(unsigned int index) { - unsigned int tmp; + unsigned int tmp, freq = apll_freq_4210[index].freq; - /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ + /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ clk_set_parent(moutcore, mout_mpll); do { - tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); tmp &= 0x7; } while (tmp != 0x2); - /* 2. Set APLL Lock time */ - __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK); - - /* 3. Change PLL PMS values */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); - tmp |= apll_freq_4210[index].mps; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - - /* 4. wait_lock_time */ - do { - tmp = __raw_readl(EXYNOS4_APLL_CON0); - } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT))); + clk_set_rate(mout_apll, freq * 1000); - /* 5. MUX_CORE_SEL = APLL */ + /* MUX_CORE_SEL = APLL */ clk_set_parent(moutcore, mout_apll); do { - tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); } -static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index) -{ - unsigned int old_pm = apll_freq_4210[old_index].mps >> 8; - unsigned int new_pm = apll_freq_4210[new_index].mps >> 8; - - return (old_pm == new_pm) ? 0 : 1; -} - static void exynos4210_set_frequency(unsigned int old_index, unsigned int new_index) { - unsigned int tmp; - if (old_index > new_index) { - if (!exynos4210_pms_change(old_index, new_index)) { - /* 1. Change the system clock divider values */ - exynos4210_set_clkdiv(new_index); - - /* 2. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_4210[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - } else { - /* Clock Configuration Procedure */ - /* 1. Change the system clock divider values */ - exynos4210_set_clkdiv(new_index); - /* 2. Change the apll m,p,s value */ - exynos4210_set_apll(new_index); - } + exynos4210_set_clkdiv(new_index); + exynos4210_set_apll(new_index); } else if (old_index < new_index) { - if (!exynos4210_pms_change(old_index, new_index)) { - /* 1. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_4210[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - - /* 2. Change the system clock divider values */ - exynos4210_set_clkdiv(new_index); - } else { - /* Clock Configuration Procedure */ - /* 1. Change the apll m,p,s value */ - exynos4210_set_apll(new_index); - /* 2. Change the system clock divider values */ - exynos4210_set_clkdiv(new_index); - } + exynos4210_set_apll(new_index); + exynos4210_set_clkdiv(new_index); } } int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -194,7 +167,8 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) info->volt_table = exynos4210_volt_table; info->freq_table = exynos4210_freq_table; info->set_freq = exynos4210_set_frequency; - info->need_apll_change = exynos4210_pms_change; + + cpufreq = info; return 0; @@ -208,4 +182,3 @@ err_moutcore: pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos4210_cpufreq_init); diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 08b7477b0aa..351a2074cfe 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c @@ -16,8 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> - -#include <mach/regs-clock.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -25,6 +25,7 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos4x12_volt_table[] = { 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500, @@ -32,21 +33,21 @@ static unsigned int exynos4x12_volt_table[] = { }; static struct cpufreq_frequency_table exynos4x12_freq_table[] = { - {L0, CPUFREQ_ENTRY_INVALID}, - {L1, 1400 * 1000}, - {L2, 1300 * 1000}, - {L3, 1200 * 1000}, - {L4, 1100 * 1000}, - {L5, 1000 * 1000}, - {L6, 900 * 1000}, - {L7, 800 * 1000}, - {L8, 700 * 1000}, - {L9, 600 * 1000}, - {L10, 500 * 1000}, - {L11, 400 * 1000}, - {L12, 300 * 1000}, - {L13, 200 * 1000}, - {0, CPUFREQ_TABLE_END}, + {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000}, + {0, L1, 1400 * 1000}, + {0, L2, 1300 * 1000}, + {0, L3, 1200 * 1000}, + {0, L4, 1100 * 1000}, + {0, L5, 1000 * 1000}, + {0, L6, 900 * 1000}, + {0, L7, 800 * 1000}, + {0, L8, 700 * 1000}, + {0, L9, 600 * 1000}, + {0, L10, 500 * 1000}, + {0, L11, 400 * 1000}, + {0, L12, 300 * 1000}, + {0, L13, 200 * 1000}, + {0, 0, CPUFREQ_TABLE_END}, }; static struct apll_freq *apll_freq_4x12; @@ -102,124 +103,92 @@ static struct apll_freq apll_freq_4412[] = { static void exynos4x12_set_clkdiv(unsigned int div_index) { unsigned int tmp; - unsigned int stat_cpu1; /* Change Divider - CPU0 */ tmp = apll_freq_4x12[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); - while (__raw_readl(EXYNOS4_CLKDIV_STATCPU) & 0x11111111) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU) + & 0x11111111) cpu_relax(); /* Change Divider - CPU1 */ tmp = apll_freq_4x12[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); - if (soc_is_exynos4212()) - stat_cpu1 = 0x11; - else - stat_cpu1 = 0x111; + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); - while (__raw_readl(EXYNOS4_CLKDIV_STATCPU1) & stat_cpu1) + do { cpu_relax(); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); + } while (tmp != 0x0); } static void exynos4x12_set_apll(unsigned int index) { - unsigned int tmp, pdiv; + unsigned int tmp, freq = apll_freq_4x12[index].freq; - /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ + /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ clk_set_parent(moutcore, mout_mpll); do { cpu_relax(); - tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); tmp &= 0x7; } while (tmp != 0x2); - /* 2. Set APLL Lock time */ - pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f); + clk_set_rate(mout_apll, freq * 1000); - __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK); - - /* 3. Change PLL PMS values */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); - tmp |= apll_freq_4x12[index].mps; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - - /* 4. wait_lock_time */ - do { - cpu_relax(); - tmp = __raw_readl(EXYNOS4_APLL_CON0); - } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT))); - - /* 5. MUX_CORE_SEL = APLL */ + /* MUX_CORE_SEL = APLL */ clk_set_parent(moutcore, mout_apll); do { cpu_relax(); - tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); } -static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index) -{ - unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8; - unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8; - - return (old_pm == new_pm) ? 0 : 1; -} - static void exynos4x12_set_frequency(unsigned int old_index, unsigned int new_index) { - unsigned int tmp; - if (old_index > new_index) { - if (!exynos4x12_pms_change(old_index, new_index)) { - /* 1. Change the system clock divider values */ - exynos4x12_set_clkdiv(new_index); - /* 2. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_4x12[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - - } else { - /* Clock Configuration Procedure */ - /* 1. Change the system clock divider values */ - exynos4x12_set_clkdiv(new_index); - /* 2. Change the apll m,p,s value */ - exynos4x12_set_apll(new_index); - } + exynos4x12_set_clkdiv(new_index); + exynos4x12_set_apll(new_index); } else if (old_index < new_index) { - if (!exynos4x12_pms_change(old_index, new_index)) { - /* 1. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_4x12[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - /* 2. Change the system clock divider values */ - exynos4x12_set_clkdiv(new_index); - } else { - /* Clock Configuration Procedure */ - /* 1. Change the apll m,p,s value */ - exynos4x12_set_apll(new_index); - /* 2. Change the system clock divider values */ - exynos4x12_set_clkdiv(new_index); - } + exynos4x12_set_apll(new_index); + exynos4x12_set_clkdiv(new_index); } } int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -238,7 +207,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) if (IS_ERR(mout_apll)) goto err_mout_apll; - if (soc_is_exynos4212()) + if (info->type == EXYNOS_SOC_4212) apll_freq_4x12 = apll_freq_4212; else apll_freq_4x12 = apll_freq_4412; @@ -250,7 +219,8 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) info->volt_table = exynos4x12_volt_table; info->freq_table = exynos4x12_freq_table; info->set_freq = exynos4x12_set_frequency; - info->need_apll_change = exynos4x12_pms_change; + + cpufreq = info; return 0; @@ -264,4 +234,3 @@ err_moutcore: pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos4x12_cpufreq_init); diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index 9fae466d774..c91ce69dc63 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c @@ -16,9 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> - -#include <mach/map.h> -#include <mach/regs-clock.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -26,6 +25,7 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos5250_volt_table[] = { 1300000, 1250000, 1225000, 1200000, 1150000, @@ -35,23 +35,23 @@ static unsigned int exynos5250_volt_table[] = { }; static struct cpufreq_frequency_table exynos5250_freq_table[] = { - {L0, 1700 * 1000}, - {L1, 1600 * 1000}, - {L2, 1500 * 1000}, - {L3, 1400 * 1000}, - {L4, 1300 * 1000}, - {L5, 1200 * 1000}, - {L6, 1100 * 1000}, - {L7, 1000 * 1000}, - {L8, 900 * 1000}, - {L9, 800 * 1000}, - {L10, 700 * 1000}, - {L11, 600 * 1000}, - {L12, 500 * 1000}, - {L13, 400 * 1000}, - {L14, 300 * 1000}, - {L15, 200 * 1000}, - {0, CPUFREQ_TABLE_END}, + {0, L0, 1700 * 1000}, + {0, L1, 1600 * 1000}, + {0, L2, 1500 * 1000}, + {0, L3, 1400 * 1000}, + {0, L4, 1300 * 1000}, + {0, L5, 1200 * 1000}, + {0, L6, 1100 * 1000}, + {0, L7, 1000 * 1000}, + {0, L8, 900 * 1000}, + {0, L9, 800 * 1000}, + {0, L10, 700 * 1000}, + {0, L11, 600 * 1000}, + {0, L12, 500 * 1000}, + {0, L13, 400 * 1000}, + {0, L14, 300 * 1000}, + {0, L15, 200 * 1000}, + {0, 0, CPUFREQ_TABLE_END}, }; static struct apll_freq apll_freq_5250[] = { @@ -88,115 +88,86 @@ static void set_clkdiv(unsigned int div_index) tmp = apll_freq_5250[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0); - while (__raw_readl(EXYNOS5_CLKDIV_STATCPU0) & 0x11111111) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0) + & 0x11111111) cpu_relax(); /* Change Divider - CPU1 */ tmp = apll_freq_5250[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1); - while (__raw_readl(EXYNOS5_CLKDIV_STATCPU1) & 0x11) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11) cpu_relax(); } -static void set_apll(unsigned int new_index, - unsigned int old_index) +static void set_apll(unsigned int index) { - unsigned int tmp, pdiv; + unsigned int tmp; + unsigned int freq = apll_freq_5250[index].freq; - /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ + /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ clk_set_parent(moutcore, mout_mpll); do { cpu_relax(); - tmp = (__raw_readl(EXYNOS5_CLKMUX_STATCPU) >> 16); + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU) + >> 16); tmp &= 0x7; } while (tmp != 0x2); - /* 2. Set APLL Lock time */ - pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f); - - __raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK); + clk_set_rate(mout_apll, freq * 1000); - /* 3. Change PLL PMS values */ - tmp = __raw_readl(EXYNOS5_APLL_CON0); - tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); - tmp |= apll_freq_5250[new_index].mps; - __raw_writel(tmp, EXYNOS5_APLL_CON0); - - /* 4. wait_lock_time */ - do { - cpu_relax(); - tmp = __raw_readl(EXYNOS5_APLL_CON0); - } while (!(tmp & (0x1 << 29))); - - /* 5. MUX_CORE_SEL = APLL */ + /* MUX_CORE_SEL = APLL */ clk_set_parent(moutcore, mout_apll); do { cpu_relax(); - tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU); tmp &= (0x7 << 16); } while (tmp != (0x1 << 16)); - -} - -static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index) -{ - unsigned int old_pm = apll_freq_5250[old_index].mps >> 8; - unsigned int new_pm = apll_freq_5250[new_index].mps >> 8; - - return (old_pm == new_pm) ? 0 : 1; } static void exynos5250_set_frequency(unsigned int old_index, unsigned int new_index) { - unsigned int tmp; - if (old_index > new_index) { - if (!exynos5250_pms_change(old_index, new_index)) { - /* 1. Change the system clock divider values */ - set_clkdiv(new_index); - /* 2. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS5_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_5250[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS5_APLL_CON0); - - } else { - /* Clock Configuration Procedure */ - /* 1. Change the system clock divider values */ - set_clkdiv(new_index); - /* 2. Change the apll m,p,s value */ - set_apll(new_index, old_index); - } + set_clkdiv(new_index); + set_apll(new_index); } else if (old_index < new_index) { - if (!exynos5250_pms_change(old_index, new_index)) { - /* 1. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS5_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_5250[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS5_APLL_CON0); - /* 2. Change the system clock divider values */ - set_clkdiv(new_index); - } else { - /* Clock Configuration Procedure */ - /* 1. Change the apll m,p,s value */ - set_apll(new_index, old_index); - /* 2. Change the system clock divider values */ - set_clkdiv(new_index); - } + set_apll(new_index); + set_clkdiv(new_index); } } int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -222,7 +193,8 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) info->volt_table = exynos5250_volt_table; info->freq_table = exynos5250_freq_table; info->set_freq = exynos5250_set_frequency; - info->need_apll_change = exynos5250_pms_change; + + cpufreq = info; return 0; @@ -236,4 +208,3 @@ err_moutcore: pr_err("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos5250_cpufreq_init); diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index d514c152fd1..f33f25b483c 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c @@ -20,7 +20,7 @@ #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -100,7 +100,6 @@ struct exynos_dvfs_data { struct resource *mem; int irq; struct clk *cpu_clk; - unsigned int cur_frequency; unsigned int latency; struct cpufreq_frequency_table *freq_table; unsigned int freq_count; @@ -115,25 +114,23 @@ static struct cpufreq_freqs freqs; static int init_div_table(void) { - struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; + struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table; unsigned int tmp, clk_div, ema_div, freq, volt_id; - int i = 0; - struct opp *opp; + struct dev_pm_opp *opp; rcu_read_lock(); - for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { - - opp = opp_find_freq_exact(dvfs_info->dev, - freq_tbl[i].frequency * 1000, true); + cpufreq_for_each_entry(pos, freq_tbl) { + opp = dev_pm_opp_find_freq_exact(dvfs_info->dev, + pos->frequency * 1000, true); if (IS_ERR(opp)) { rcu_read_unlock(); dev_err(dvfs_info->dev, "failed to find valid OPP for %u KHZ\n", - freq_tbl[i].frequency); + pos->frequency); return PTR_ERR(opp); } - freq = freq_tbl[i].frequency / 1000; /* In MHZ */ + freq = pos->frequency / 1000; /* In MHZ */ clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK) << P0_7_CPUCLKDEV_SHIFT; clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK) @@ -142,7 +139,7 @@ static int init_div_table(void) << P0_7_CSCLKDEV_SHIFT; /* Calculate EMA */ - volt_id = opp_get_voltage(opp); + volt_id = dev_pm_opp_get_voltage(opp); volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP; if (volt_id < PMIC_HIGH_VOLT) { ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) | @@ -158,17 +155,19 @@ static int init_div_table(void) tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT) | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT)); - __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i); + __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * + (pos - freq_tbl)); } rcu_read_unlock(); return 0; } -static void exynos_enable_dvfs(void) +static void exynos_enable_dvfs(unsigned int cur_frequency) { - unsigned int tmp, i, cpu; + unsigned int tmp, cpu; struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; + struct cpufreq_frequency_table *pos; /* Disable DVFS */ __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL); @@ -183,24 +182,24 @@ static void exynos_enable_dvfs(void) __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN); /* Set initial performance index */ - for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) - if (freq_table[i].frequency == dvfs_info->cur_frequency) + cpufreq_for_each_entry(pos, freq_table) + if (pos->frequency == cur_frequency) break; - if (freq_table[i].frequency == CPUFREQ_TABLE_END) { + if (pos->frequency == CPUFREQ_TABLE_END) { dev_crit(dvfs_info->dev, "Boot up frequency not supported\n"); /* Assign the highest frequency */ - i = 0; - dvfs_info->cur_frequency = freq_table[i].frequency; + pos = freq_table; + cur_frequency = pos->frequency; } dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ", - dvfs_info->cur_frequency); + cur_frequency); for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) { tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT); - tmp |= (i << C0_3_PSTATE_NEW_SHIFT); + tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT); __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); } @@ -209,39 +208,18 @@ static void exynos_enable_dvfs(void) dvfs_info->base + XMU_DVFS_CTRL); } -static int exynos_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - dvfs_info->freq_table); -} - -static unsigned int exynos_getspeed(unsigned int cpu) +static int exynos_target(struct cpufreq_policy *policy, unsigned int index) { - return dvfs_info->cur_frequency; -} - -static int exynos_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int index, tmp; - int ret = 0, i; + unsigned int tmp; + int i; struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; mutex_lock(&cpufreq_lock); - ret = cpufreq_frequency_table_target(policy, freq_table, - target_freq, relation, &index); - if (ret) - goto out; - - freqs.old = dvfs_info->cur_frequency; + freqs.old = policy->cur; freqs.new = freq_table[index].frequency; - if (freqs.old == freqs.new) - goto out; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); /* Set the target frequency in all C0_3_PSTATE register */ for_each_cpu(i, policy->cpus) { @@ -251,9 +229,8 @@ static int exynos_target(struct cpufreq_policy *policy, __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4); } -out: mutex_unlock(&cpufreq_lock); - return ret; + return 0; } static void exynos_cpufreq_work(struct work_struct *work) @@ -267,7 +244,7 @@ static void exynos_cpufreq_work(struct work_struct *work) goto skip_work; mutex_lock(&cpufreq_lock); - freqs.old = dvfs_info->cur_frequency; + freqs.old = policy->cur; cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS); if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1) @@ -277,12 +254,11 @@ static void exynos_cpufreq_work(struct work_struct *work) if (likely(index < dvfs_info->freq_count)) { freqs.new = freq_table[index].frequency; - dvfs_info->cur_frequency = freqs.new; } else { dev_crit(dvfs_info->dev, "New frequency out of range\n"); - freqs.new = dvfs_info->cur_frequency; + freqs.new = freqs.old; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); cpufreq_cpu_put(policy); mutex_unlock(&cpufreq_lock); @@ -324,30 +300,20 @@ static void exynos_sort_descend_freq_table(void) static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) { - int ret; - - ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table); - if (ret) { - dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret); - return ret; - } - - policy->cur = dvfs_info->cur_frequency; - policy->cpuinfo.transition_latency = dvfs_info->latency; - cpumask_setall(policy->cpus); - - cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu); - - return 0; + policy->clk = dvfs_info->cpu_clk; + return cpufreq_generic_init(policy, dvfs_info->freq_table, + dvfs_info->latency); } static struct cpufreq_driver exynos_driver = { - .flags = CPUFREQ_STICKY, - .verify = exynos_verify_speed, - .target = exynos_target, - .get = exynos_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = exynos_target, + .get = cpufreq_generic_get, .init = exynos_cpufreq_cpu_init, .name = CPUFREQ_NAME, + .attr = cpufreq_generic_attr, }; static const struct of_device_id exynos_cpufreq_match[] = { @@ -363,6 +329,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) int ret = -EINVAL; struct device_node *np; struct resource res; + unsigned int cur_frequency; np = pdev->dev.of_node; if (!np) @@ -399,13 +366,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) goto err_put_node; } - ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); + ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev, + &dvfs_info->freq_table); if (ret) { dev_err(dvfs_info->dev, "failed to init cpufreq table: %d\n", ret); goto err_put_node; } - dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev); + dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev); exynos_sort_descend_freq_table(); if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency)) @@ -418,13 +386,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) goto err_free_table; } - dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk); - if (!dvfs_info->cur_frequency) { + cur_frequency = clk_get_rate(dvfs_info->cpu_clk); + if (!cur_frequency) { dev_err(dvfs_info->dev, "Failed to get clock rate\n"); ret = -EINVAL; goto err_free_table; } - dvfs_info->cur_frequency /= 1000; + cur_frequency /= 1000; INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work); ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq, @@ -441,7 +409,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) goto err_free_table; } - exynos_enable_dvfs(); + exynos_enable_dvfs(cur_frequency); ret = cpufreq_register_driver(&exynos_driver); if (ret) { dev_err(dvfs_info->dev, @@ -454,17 +422,17 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) return 0; err_free_table: - opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); + dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); err_put_node: of_node_put(np); - dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); + dev_err(&pdev->dev, "%s: failed initialization\n", __func__); return ret; } static int exynos_cpufreq_remove(struct platform_device *pdev) { cpufreq_unregister_driver(&exynos_driver); - opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); + dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); return 0; } diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index f111454a7ae..1632981c4b2 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -21,19 +21,19 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { + struct cpufreq_frequency_table *pos; unsigned int min_freq = ~0; unsigned int max_freq = 0; - unsigned int i; + unsigned int freq; - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) { - pr_debug("table entry %u is invalid, skipping\n", i); + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + if (!cpufreq_boost_enabled() + && (pos->flags & CPUFREQ_BOOST_FREQ)) continue; - } - pr_debug("table entry %u: %u kHz, %u driver_data\n", - i, freq, table[i].driver_data); + + pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq); if (freq < min_freq) min_freq = freq; if (freq > max_freq) @@ -54,31 +54,31 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { - unsigned int next_larger = ~0; - unsigned int i; - unsigned int count = 0; + struct cpufreq_frequency_table *pos; + unsigned int freq, next_larger = ~0; + bool found = false; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) - continue; - if ((freq >= policy->min) && (freq <= policy->max)) - count++; - else if ((next_larger > freq) && (freq > policy->max)) + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + + if ((freq >= policy->min) && (freq <= policy->max)) { + found = true; + break; + } + + if ((next_larger > freq) && (freq > policy->max)) next_larger = freq; } - if (!count) + if (!found) { policy->max = next_larger; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); + } pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); @@ -87,6 +87,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); +/* + * Generic routine to verify policy & frequency table, requires driver to set + * policy->freq_table prior to it. + */ +int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *table = + cpufreq_frequency_get_table(policy->cpu); + if (!table) + return -ENODEV; + + return cpufreq_frequency_table_verify(policy, table); +} +EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); int cpufreq_frequency_table_target(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, @@ -102,7 +116,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, .driver_data = ~0, .frequency = 0, }; - unsigned int i; + struct cpufreq_frequency_table *pos; + unsigned int freq, i = 0; pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); @@ -116,15 +131,19 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, break; } - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) - continue; + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + + i = pos - table; if ((freq < policy->min) || (freq > policy->max)) continue; + if (freq == target_freq) { + optimal.driver_data = i; + break; + } switch (relation) { case CPUFREQ_RELATION_H: - if (freq <= target_freq) { + if (freq < target_freq) { if (freq >= optimal.frequency) { optimal.frequency = freq; optimal.driver_data = i; @@ -137,7 +156,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } break; case CPUFREQ_RELATION_L: - if (freq >= target_freq) { + if (freq > target_freq) { if (freq <= optimal.frequency) { optimal.frequency = freq; optimal.driver_data = i; @@ -158,33 +177,60 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } else *index = optimal.driver_data; - pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, - table[*index].driver_data); + pr_debug("target index is %u, freq is:%u kHz\n", *index, + table[*index].frequency); return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); -static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table); +int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, + unsigned int freq) +{ + struct cpufreq_frequency_table *pos, *table; + + table = cpufreq_frequency_get_table(policy->cpu); + if (unlikely(!table)) { + pr_debug("%s: Unable to find frequency table\n", __func__); + return -ENOENT; + } + + cpufreq_for_each_valid_entry(pos, table) + if (pos->frequency == freq) + return pos - table; + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index); + /** * show_available_freqs - show available frequencies for the specified CPU */ -static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) +static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf, + bool show_boost) { - unsigned int i = 0; - unsigned int cpu = policy->cpu; ssize_t count = 0; - struct cpufreq_frequency_table *table; + struct cpufreq_frequency_table *pos, *table = policy->freq_table; - if (!per_cpu(cpufreq_show_table, cpu)) + if (!table) return -ENODEV; - table = per_cpu(cpufreq_show_table, cpu); - - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) + cpufreq_for_each_valid_entry(pos, table) { + /* + * show_boost = true and driver_data = BOOST freq + * display BOOST freqs + * + * show_boost = false and driver_data = BOOST freq + * show_boost = true and driver_data != BOOST freq + * continue - do not display anything + * + * show_boost = false and driver_data != BOOST freq + * display NON BOOST freqs + */ + if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ)) continue; - count += sprintf(&buf[count], "%d ", table[i].frequency); + + count += sprintf(&buf[count], "%d ", pos->frequency); } count += sprintf(&buf[count], "\n"); @@ -192,45 +238,61 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) } -struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { - .attr = { .name = "scaling_available_frequencies", - .mode = 0444, - }, - .show = show_available_freqs, -}; -EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); +#define cpufreq_attr_available_freq(_name) \ +struct freq_attr cpufreq_freq_attr_##_name##_freqs = \ +__ATTR_RO(_name##_frequencies) -/* - * if you use these, you must assure that the frequency table is valid - * all the time between get_attr and put_attr! +/** + * show_scaling_available_frequencies - show available normal frequencies for + * the specified CPU */ -void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, - unsigned int cpu) +static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy, + char *buf) { - pr_debug("setting show_table for cpu %u to %p\n", cpu, table); - per_cpu(cpufreq_show_table, cpu) = table; + return show_available_freqs(policy, buf, false); } -EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); +cpufreq_attr_available_freq(scaling_available); +EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); -void cpufreq_frequency_table_put_attr(unsigned int cpu) +/** + * show_available_boost_freqs - show available boost frequencies for + * the specified CPU + */ +static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy, + char *buf) { - pr_debug("clearing show_table for cpu %u\n", cpu); - per_cpu(cpufreq_show_table, cpu) = NULL; + return show_available_freqs(policy, buf, true); } -EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); +cpufreq_attr_available_freq(scaling_boost); +EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs); + +struct freq_attr *cpufreq_generic_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, +#ifdef CONFIG_CPU_FREQ_BOOST_SW + &cpufreq_freq_attr_scaling_boost_freqs, +#endif + NULL, +}; +EXPORT_SYMBOL_GPL(cpufreq_generic_attr); -void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) +int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table) { - pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", - policy->cpu, policy->last_cpu); - per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table, - policy->last_cpu); - per_cpu(cpufreq_show_table, policy->last_cpu) = NULL; + int ret = cpufreq_frequency_table_cpuinfo(policy, table); + + if (!ret) + policy->freq_table = table; + + return ret; } +EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); + +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) { - return per_cpu(cpufreq_show_table, cpu); + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + return policy ? policy->freq_table : NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 70442c7b5e7..1d723dc8880 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -265,7 +265,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) freqs.new = new_khz; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); local_irq_save(flags); if (new_khz != stock_freq) { @@ -314,7 +314,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) gx_params->pci_suscfg = suscfg; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", gx_params->on_duration * 32, gx_params->off_duration * 32); @@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy, static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) { - unsigned int maxfreq, curfreq; + unsigned int maxfreq; if (!policy || policy->cpu != 0) return -ENODEV; @@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; stock_freq = maxfreq; - curfreq = gx_get_cpuspeed(0); pr_debug("cpu max frequency is %d.\n", maxfreq); - pr_debug("cpu current frequency is %dkHz.\n", curfreq); /* setup basic struct for cpufreq API */ policy->cpu = 0; @@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) else policy->min = maxfreq / POLICY_MIN_DIV; policy->max = maxfreq; - policy->cur = curfreq; policy->cpuinfo.min_freq = maxfreq / max_duration; policy->cpuinfo.max_freq = maxfreq; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index 794123fcf3e..bf8902a0866 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void) struct device_node *np; int ret; - if (!of_machine_is_compatible("calxeda,highbank")) + if ((!of_machine_is_compatible("calxeda,highbank")) && + (!of_machine_is_compatible("calxeda,ecx-2000"))) return -ENODEV; cpu_dev = get_cpu_device(0); diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c index 3e14f031717..c30aaa6a54e 100644 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c @@ -141,7 +141,6 @@ processor_set_freq ( { int ret = 0; u32 value = 0; - struct cpufreq_freqs cpufreq_freqs; cpumask_t saved_mask; int retval; @@ -168,13 +167,6 @@ processor_set_freq ( pr_debug("Transitioning from P%d to P%d\n", data->acpi_data.state, state); - /* cpufreq frequency struct */ - cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; - cpufreq_freqs.new = data->freq_table[state].frequency; - - /* notify cpufreq */ - cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE); - /* * First we write the target state's 'control' value to the * control_register. @@ -186,22 +178,11 @@ processor_set_freq ( ret = processor_set_pstate(value); if (ret) { - unsigned int tmp = cpufreq_freqs.new; - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_POSTCHANGE); - cpufreq_freqs.new = cpufreq_freqs.old; - cpufreq_freqs.old = tmp; - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_POSTCHANGE); printk(KERN_WARNING "Transition failed with error %d\n", ret); retval = -ENODEV; goto migrate_end; } - cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE); - data->acpi_data.state = state; retval = 0; @@ -227,42 +208,11 @@ acpi_cpufreq_get ( static int acpi_cpufreq_target ( struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - unsigned int next_state = 0; - unsigned int result = 0; - - pr_debug("acpi_cpufreq_setpolicy\n"); - - result = cpufreq_frequency_table_target(policy, - data->freq_table, target_freq, relation, &next_state); - if (result) - return (result); - - result = processor_set_freq(data, policy, next_state); - - return (result); -} - - -static int -acpi_cpufreq_verify ( - struct cpufreq_policy *policy) + unsigned int index) { - unsigned int result = 0; - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_verify\n"); - - result = cpufreq_frequency_table_verify(policy, - data->freq_table); - - return (result); + return processor_set_freq(acpi_io_data[policy->cpu], policy, index); } - static int acpi_cpufreq_cpu_init ( struct cpufreq_policy *policy) @@ -304,7 +254,7 @@ acpi_cpufreq_cpu_init ( } /* alloc freq_table */ - data->freq_table = kmalloc(sizeof(*data->freq_table) * + data->freq_table = kzalloc(sizeof(*data->freq_table) * (data->acpi_data.state_count + 1), GFP_KERNEL); if (!data->freq_table) { @@ -321,12 +271,10 @@ acpi_cpufreq_cpu_init ( data->acpi_data.states[i].transition_latency * 1000; } } - policy->cur = processor_get_freq(data, policy->cpu); /* table init */ for (i = 0; i <= data->acpi_data.state_count; i++) { - data->freq_table[i].driver_data = i; if (i < data->acpi_data.state_count) { data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; @@ -335,7 +283,7 @@ acpi_cpufreq_cpu_init ( } } - result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); + result = cpufreq_table_validate_and_show(policy, data->freq_table); if (result) { goto err_freqfree; } @@ -356,8 +304,6 @@ acpi_cpufreq_cpu_init ( (u32) data->acpi_data.states[i].status, (u32) data->acpi_data.states[i].control); - cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); - /* the first call to ->target() should result in us actually * writing something to the appropriate registers. */ data->resume = 1; @@ -385,7 +331,6 @@ acpi_cpufreq_cpu_exit ( pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { - cpufreq_frequency_table_put_attr(policy->cpu); acpi_io_data[policy->cpu] = NULL; acpi_processor_unregister_performance(&data->acpi_data, policy->cpu); @@ -396,20 +341,14 @@ acpi_cpufreq_cpu_exit ( } -static struct freq_attr* acpi_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = acpi_cpufreq_verify, - .target = acpi_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = acpi_cpufreq_target, .get = acpi_cpufreq_get, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, .name = "acpi-cpufreq", - .attr = acpi_cpufreq_attr, + .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 3e396543aea..af366c21d4b 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -7,12 +7,12 @@ */ #include <linux/clk.h> +#include <linux/cpu.h> #include <linux/cpufreq.h> -#include <linux/delay.h> #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> @@ -34,77 +34,53 @@ static struct device *cpu_dev; static struct cpufreq_frequency_table *freq_table; static unsigned int transition_latency; -static int imx6q_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int imx6q_get_speed(unsigned int cpu) -{ - return clk_get_rate(arm_clk) / 1000; -} +static u32 *imx6_soc_volt; +static u32 soc_opp_count; -static int imx6q_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) +static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) { - struct cpufreq_freqs freqs; - struct opp *opp; + struct dev_pm_opp *opp; unsigned long freq_hz, volt, volt_old; - unsigned int index; + unsigned int old_freq, new_freq; int ret; - ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &index); - if (ret) { - dev_err(cpu_dev, "failed to match target frequency %d: %d\n", - target_freq, ret); - return ret; - } - - freqs.new = freq_table[index].frequency; - freq_hz = freqs.new * 1000; - freqs.old = clk_get_rate(arm_clk) / 1000; - - if (freqs.old == freqs.new) - return 0; + new_freq = freq_table[index].frequency; + freq_hz = new_freq * 1000; + old_freq = clk_get_rate(arm_clk) / 1000; rcu_read_lock(); - opp = opp_find_freq_ceil(cpu_dev, &freq_hz); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); if (IS_ERR(opp)) { rcu_read_unlock(); dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz); return PTR_ERR(opp); } - volt = opp_get_voltage(opp); + volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); volt_old = regulator_get_voltage(arm_reg); dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", - freqs.old / 1000, volt_old / 1000, - freqs.new / 1000, volt / 1000); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + old_freq / 1000, volt_old / 1000, + new_freq / 1000, volt / 1000); /* scaling up? scale voltage before frequency */ - if (freqs.new > freqs.old) { + if (new_freq > old_freq) { + ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret); + return ret; + } + ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret); + return ret; + } ret = regulator_set_voltage_tol(arm_reg, volt, 0); if (ret) { dev_err(cpu_dev, "failed to scale vddarm up: %d\n", ret); - freqs.new = freqs.old; - goto post_notify; - } - - /* - * Need to increase vddpu and vddsoc for safety - * if we are about to run at 1.2 GHz. - */ - if (freqs.new == FREQ_1P2_GHZ / 1000) { - regulator_set_voltage_tol(pu_reg, - PU_SOC_VOLTAGE_HIGH, 0); - regulator_set_voltage_tol(soc_reg, - PU_SOC_VOLTAGE_HIGH, 0); + return ret; } } @@ -120,89 +96,72 @@ static int imx6q_set_target(struct cpufreq_policy *policy, clk_set_parent(step_clk, pll2_pfd2_396m_clk); clk_set_parent(pll1_sw_clk, step_clk); if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { - clk_set_rate(pll1_sys_clk, freqs.new * 1000); + clk_set_rate(pll1_sys_clk, new_freq * 1000); clk_set_parent(pll1_sw_clk, pll1_sys_clk); } /* Ensure the arm clock divider is what we expect */ - ret = clk_set_rate(arm_clk, freqs.new * 1000); + ret = clk_set_rate(arm_clk, new_freq * 1000); if (ret) { dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); regulator_set_voltage_tol(arm_reg, volt_old, 0); - freqs.new = freqs.old; - goto post_notify; + return ret; } /* scaling down? scale voltage after frequency */ - if (freqs.new < freqs.old) { + if (new_freq < old_freq) { ret = regulator_set_voltage_tol(arm_reg, volt, 0); if (ret) { dev_warn(cpu_dev, "failed to scale vddarm down: %d\n", ret); ret = 0; } - - if (freqs.old == FREQ_1P2_GHZ / 1000) { - regulator_set_voltage_tol(pu_reg, - PU_SOC_VOLTAGE_NORMAL, 0); - regulator_set_voltage_tol(soc_reg, - PU_SOC_VOLTAGE_NORMAL, 0); + ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret); + ret = 0; + } + ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret); + ret = 0; } } -post_notify: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return ret; -} - -static int imx6q_cpufreq_init(struct cpufreq_policy *policy) -{ - int ret; - - ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (ret) { - dev_err(cpu_dev, "invalid frequency table: %d\n", ret); - return ret; - } - - policy->cpuinfo.transition_latency = transition_latency; - policy->cur = clk_get_rate(arm_clk) / 1000; - cpumask_setall(policy->cpus); - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - return 0; } -static int imx6q_cpufreq_exit(struct cpufreq_policy *policy) +static int imx6q_cpufreq_init(struct cpufreq_policy *policy) { - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + policy->clk = arm_clk; + return cpufreq_generic_init(policy, freq_table, transition_latency); } -static struct freq_attr *imx6q_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver imx6q_cpufreq_driver = { - .verify = imx6q_verify_speed, - .target = imx6q_set_target, - .get = imx6q_get_speed, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = imx6q_set_target, + .get = cpufreq_generic_get, .init = imx6q_cpufreq_init, - .exit = imx6q_cpufreq_exit, .name = "imx6q-cpufreq", - .attr = imx6q_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int imx6q_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - struct opp *opp; + struct dev_pm_opp *opp; unsigned long min_volt, max_volt; int num, ret; - - cpu_dev = &pdev->dev; + const struct property *prop; + const __be32 *val; + u32 nr, i, j; + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) { + pr_err("failed to get cpu0 device\n"); + return -ENODEV; + } np = of_node_get(cpu_dev->of_node); if (!np) { @@ -210,73 +169,126 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) return -ENOENT; } - arm_clk = devm_clk_get(cpu_dev, "arm"); - pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys"); - pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw"); - step_clk = devm_clk_get(cpu_dev, "step"); - pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m"); + arm_clk = clk_get(cpu_dev, "arm"); + pll1_sys_clk = clk_get(cpu_dev, "pll1_sys"); + pll1_sw_clk = clk_get(cpu_dev, "pll1_sw"); + step_clk = clk_get(cpu_dev, "step"); + pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m"); if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) || IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) { dev_err(cpu_dev, "failed to get clocks\n"); ret = -ENOENT; - goto put_node; + goto put_clk; } - arm_reg = devm_regulator_get(cpu_dev, "arm"); - pu_reg = devm_regulator_get(cpu_dev, "pu"); - soc_reg = devm_regulator_get(cpu_dev, "soc"); + arm_reg = regulator_get(cpu_dev, "arm"); + pu_reg = regulator_get(cpu_dev, "pu"); + soc_reg = regulator_get(cpu_dev, "soc"); if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) { dev_err(cpu_dev, "failed to get regulators\n"); ret = -ENOENT; - goto put_node; + goto put_reg; } - /* We expect an OPP table supplied by platform */ - num = opp_get_opp_count(cpu_dev); + /* + * We expect an OPP table supplied by platform. + * Just, incase the platform did not supply the OPP + * table, it will try to get it. + */ + num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { - ret = num; - dev_err(cpu_dev, "no OPP table is found: %d\n", ret); - goto put_node; + ret = of_init_opp_table(cpu_dev); + if (ret < 0) { + dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); + goto put_reg; + } + + num = dev_pm_opp_get_opp_count(cpu_dev); + if (num < 0) { + ret = num; + dev_err(cpu_dev, "no OPP table is found: %d\n", ret); + goto put_reg; + } } - ret = opp_init_cpufreq_table(cpu_dev, &freq_table); + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto put_node; + goto put_reg; + } + + /* Make imx6_soc_volt array's size same as arm opp number */ + imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL); + if (imx6_soc_volt == NULL) { + ret = -ENOMEM; + goto free_freq_table; + } + + prop = of_find_property(np, "fsl,soc-operating-points", NULL); + if (!prop || !prop->value) + goto soc_opp_out; + + /* + * Each OPP is a set of tuples consisting of frequency and + * voltage like <freq-kHz vol-uV>. + */ + nr = prop->length / sizeof(u32); + if (nr % 2 || (nr / 2) < num) + goto soc_opp_out; + + for (j = 0; j < num; j++) { + val = prop->value; + for (i = 0; i < nr / 2; i++) { + unsigned long freq = be32_to_cpup(val++); + unsigned long volt = be32_to_cpup(val++); + if (freq_table[j].frequency == freq) { + imx6_soc_volt[soc_opp_count++] = volt; + break; + } + } + } + +soc_opp_out: + /* use fixed soc opp volt if no valid soc opp info found in dtb */ + if (soc_opp_count != num) { + dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n"); + for (j = 0; j < num; j++) + imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL; + if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ) + imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH; } if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; /* + * Calculate the ramp time for max voltage change in the + * VDDSOC and VDDPU regulators. + */ + ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); + if (ret > 0) + transition_latency += ret * 1000; + ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); + if (ret > 0) + transition_latency += ret * 1000; + + /* * OPP is maintained in order of increasing frequency, and * freq_table initialised from OPP is therefore sorted in the * same order. */ rcu_read_lock(); - opp = opp_find_freq_exact(cpu_dev, + opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); - min_volt = opp_get_voltage(opp); - opp = opp_find_freq_exact(cpu_dev, + min_volt = dev_pm_opp_get_voltage(opp); + opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[--num].frequency * 1000, true); - max_volt = opp_get_voltage(opp); + max_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); if (ret > 0) transition_latency += ret * 1000; - /* Count vddpu and vddsoc latency in for 1.2 GHz support */ - if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) { - ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL, - PU_SOC_VOLTAGE_HIGH); - if (ret > 0) - transition_latency += ret * 1000; - ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL, - PU_SOC_VOLTAGE_HIGH); - if (ret > 0) - transition_latency += ret * 1000; - } - ret = cpufreq_register_driver(&imx6q_cpufreq_driver); if (ret) { dev_err(cpu_dev, "failed register driver: %d\n", ret); @@ -287,8 +299,25 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) return 0; free_freq_table: - opp_free_cpufreq_table(cpu_dev, &freq_table); -put_node: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +put_reg: + if (!IS_ERR(arm_reg)) + regulator_put(arm_reg); + if (!IS_ERR(pu_reg)) + regulator_put(pu_reg); + if (!IS_ERR(soc_reg)) + regulator_put(soc_reg); +put_clk: + if (!IS_ERR(arm_clk)) + clk_put(arm_clk); + if (!IS_ERR(pll1_sys_clk)) + clk_put(pll1_sys_clk); + if (!IS_ERR(pll1_sw_clk)) + clk_put(pll1_sw_clk); + if (!IS_ERR(step_clk)) + clk_put(step_clk); + if (!IS_ERR(pll2_pfd2_396m_clk)) + clk_put(pll2_pfd2_396m_clk); of_node_put(np); return ret; } @@ -296,7 +325,15 @@ put_node: static int imx6q_cpufreq_remove(struct platform_device *pdev) { cpufreq_unregister_driver(&imx6q_cpufreq_driver); - opp_free_cpufreq_table(cpu_dev, &freq_table); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); + regulator_put(arm_reg); + regulator_put(pu_reg); + regulator_put(soc_reg); + clk_put(arm_clk); + clk_put(pll1_sys_clk); + clk_put(pll1_sw_clk); + clk_put(step_clk); + clk_put(pll2_pfd2_396m_clk); return 0; } diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index f7c99df0880..e5122f1bfe7 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -15,18 +15,19 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_address.h> -#include <mach/hardware.h> -#include <mach/platform.h> #include <asm/mach-types.h> #include <asm/hardware/icst.h> -static struct cpufreq_driver integrator_driver; +static void __iomem *cm_base; +/* The cpufreq driver only use the OSC register */ +#define INTEGRATOR_HDR_OSC_OFFSET 0x08 +#define INTEGRATOR_HDR_LOCK_OFFSET 0x14 -#define CM_ID __io_address(INTEGRATOR_HDR_ID) -#define CM_OSC __io_address(INTEGRATOR_HDR_OSC) -#define CM_STAT __io_address(INTEGRATOR_HDR_STAT) -#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK) +static struct cpufreq_driver integrator_driver; static const struct icst_params lclk_params = { .ref = 24000000, @@ -59,9 +60,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy) { struct icst_vco vco; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); policy->max = icst_hz(&cclk_params, vco) / 1000; @@ -69,10 +68,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy) vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); policy->min = icst_hz(&cclk_params, vco) / 1000; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - + cpufreq_verify_within_cpu_limits(policy); return 0; } @@ -100,7 +96,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, BUG_ON(cpu != smp_processor_id()); /* get current setting */ - cm_osc = __raw_readl(CM_OSC); + cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; @@ -126,9 +122,9 @@ static int integrator_set_target(struct cpufreq_policy *policy, return 0; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); - cm_osc = __raw_readl(CM_OSC); + cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); if (machine_is_integrator()) { cm_osc &= 0xfffff800; @@ -138,16 +134,16 @@ static int integrator_set_target(struct cpufreq_policy *policy, } cm_osc |= vco.v; - __raw_writel(0xa05f, CM_LOCK); - __raw_writel(cm_osc, CM_OSC); - __raw_writel(0, CM_LOCK); + __raw_writel(0xa05f, cm_base + INTEGRATOR_HDR_LOCK_OFFSET); + __raw_writel(cm_osc, cm_base + INTEGRATOR_HDR_OSC_OFFSET); + __raw_writel(0, cm_base + INTEGRATOR_HDR_LOCK_OFFSET); /* * Restore the CPUs allowed mask. */ set_cpus_allowed(current, cpus_allowed); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); return 0; } @@ -165,7 +161,7 @@ static unsigned int integrator_get(unsigned int cpu) BUG_ON(cpu != smp_processor_id()); /* detect memory etc. */ - cm_osc = __raw_readl(CM_OSC); + cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; @@ -186,15 +182,15 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy) { /* set default policy and cpuinfo */ - policy->cpuinfo.max_freq = 160000; - policy->cpuinfo.min_freq = 12000; + policy->max = policy->cpuinfo.max_freq = 160000; + policy->min = policy->cpuinfo.min_freq = 12000; policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ - policy->cur = policy->min = policy->max = integrator_get(policy->cpu); return 0; } static struct cpufreq_driver integrator_driver = { + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = integrator_verify_policy, .target = integrator_set_target, .get = integrator_get, @@ -202,19 +198,43 @@ static struct cpufreq_driver integrator_driver = { .name = "integrator", }; -static int __init integrator_cpu_init(void) +static int __init integrator_cpufreq_probe(struct platform_device *pdev) { + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!cm_base) + return -ENODEV; + return cpufreq_register_driver(&integrator_driver); } -static void __exit integrator_cpu_exit(void) +static void __exit integrator_cpufreq_remove(struct platform_device *pdev) { cpufreq_unregister_driver(&integrator_driver); } +static const struct of_device_id integrator_cpufreq_match[] = { + { .compatible = "arm,core-module-integrator"}, + { }, +}; + +static struct platform_driver integrator_cpufreq_driver = { + .driver = { + .name = "integrator-cpufreq", + .owner = THIS_MODULE, + .of_match_table = integrator_cpufreq_match, + }, + .remove = __exit_p(integrator_cpufreq_remove), +}; + +module_platform_driver_probe(integrator_cpufreq_driver, + integrator_cpufreq_probe); + MODULE_AUTHOR ("Russell M. King"); MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); MODULE_LICENSE ("GPL"); - -module_init(integrator_cpu_init); -module_exit(integrator_cpu_exit); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 9733f29ed14..86631cb6f7d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -25,18 +25,24 @@ #include <linux/types.h> #include <linux/fs.h> #include <linux/debugfs.h> +#include <linux/acpi.h> #include <trace/events/power.h> #include <asm/div64.h> #include <asm/msr.h> #include <asm/cpu_device_id.h> -#define SAMPLE_COUNT 3 +#define BYT_RATIOS 0x66a +#define BYT_VIDS 0x66b +#define BYT_TURBO_RATIOS 0x66c +#define BYT_TURBO_VIDS 0x66d + #define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) + static inline int32_t mul_fp(int32_t x, int32_t y) { return ((int64_t)x * (int64_t)y) >> FRAC_BITS; @@ -48,10 +54,11 @@ static inline int32_t div_fp(int32_t x, int32_t y) } struct sample { - int core_pct_busy; + int32_t core_pct_busy; u64 aperf; u64 mperf; int freq; + ktime_t time; }; struct pstate_data { @@ -61,6 +68,13 @@ struct pstate_data { int turbo_pstate; }; +struct vid_data { + int min; + int max; + int turbo; + int32_t ratio; +}; + struct _pid { int setpoint; int32_t integral; @@ -68,26 +82,22 @@ struct _pid { int32_t i_gain; int32_t d_gain; int deadband; - int last_err; + int32_t last_err; }; struct cpudata { int cpu; - char name[64]; - struct timer_list timer; - struct pstate_adjust_policy *pstate_policy; struct pstate_data pstate; + struct vid_data vid; struct _pid pid; - int min_pstate_count; - + ktime_t last_sample_time; u64 prev_aperf; u64 prev_mperf; - int sample_ptr; - struct sample samples[SAMPLE_COUNT]; + struct sample sample; }; static struct cpudata **all_cpu_data; @@ -100,17 +110,25 @@ struct pstate_adjust_policy { int i_gain_pct; }; -static struct pstate_adjust_policy default_policy = { - .sample_rate_ms = 10, - .deadband = 0, - .setpoint = 97, - .p_gain_pct = 20, - .d_gain_pct = 0, - .i_gain_pct = 0, +struct pstate_funcs { + int (*get_max)(void); + int (*get_min)(void); + int (*get_turbo)(void); + void (*set)(struct cpudata*, int pstate); + void (*get_vid)(struct cpudata *); +}; + +struct cpu_defaults { + struct pstate_adjust_policy pid_policy; + struct pstate_funcs funcs; }; +static struct pstate_adjust_policy pid_params; +static struct pstate_funcs pstate_funcs; + struct perf_limits { int no_turbo; + int turbo_disabled; int max_perf_pct; int min_perf_pct; int32_t max_perf; @@ -134,7 +152,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy, pid->setpoint = setpoint; pid->deadband = deadband; pid->integral = int_tofp(integral); - pid->last_err = setpoint - busy; + pid->last_err = int_tofp(setpoint) - int_tofp(busy); } static inline void pid_p_gain_set(struct _pid *pid, int percent) @@ -153,16 +171,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent) pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); } -static signed int pid_calc(struct _pid *pid, int busy) +static signed int pid_calc(struct _pid *pid, int32_t busy) { - signed int err, result; + signed int result; int32_t pterm, dterm, fp_error; int32_t integral_limit; - err = pid->setpoint - busy; - fp_error = int_tofp(err); + fp_error = int_tofp(pid->setpoint) - busy; - if (abs(err) <= pid->deadband) + if (abs(fp_error) <= int_tofp(pid->deadband)) return 0; pterm = mul_fp(pid->p_gain, fp_error); @@ -176,24 +193,24 @@ static signed int pid_calc(struct _pid *pid, int busy) if (pid->integral < -integral_limit) pid->integral = -integral_limit; - dterm = mul_fp(pid->d_gain, (err - pid->last_err)); - pid->last_err = err; + dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); + pid->last_err = fp_error; result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; - + result = result + (1 << (FRAC_BITS-1)); return (signed int)fp_toint(result); } static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) { - pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct); - pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct); - pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct); + pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); + pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); + pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); pid_reset(&cpu->pid, - cpu->pstate_policy->setpoint, + pid_params.setpoint, 100, - cpu->pstate_policy->deadband, + pid_params.deadband, 0); } @@ -227,12 +244,12 @@ struct pid_param { }; static struct pid_param pid_files[] = { - {"sample_rate_ms", &default_policy.sample_rate_ms}, - {"d_gain_pct", &default_policy.d_gain_pct}, - {"i_gain_pct", &default_policy.i_gain_pct}, - {"deadband", &default_policy.deadband}, - {"setpoint", &default_policy.setpoint}, - {"p_gain_pct", &default_policy.p_gain_pct}, + {"sample_rate_ms", &pid_params.sample_rate_ms}, + {"d_gain_pct", &pid_params.d_gain_pct}, + {"i_gain_pct", &pid_params.i_gain_pct}, + {"deadband", &pid_params.deadband}, + {"setpoint", &pid_params.setpoint}, + {"p_gain_pct", &pid_params.p_gain_pct}, {NULL, NULL} }; @@ -271,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; limits.no_turbo = clamp_t(int, input, 0 , 1); - + if (limits.turbo_disabled) { + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + limits.no_turbo = limits.turbo_disabled; + } return count; } @@ -337,42 +357,153 @@ static void intel_pstate_sysfs_expose_params(void) } /************************** sysfs end ************************/ +static int byt_get_min_pstate(void) +{ + u64 value; + rdmsrl(BYT_RATIOS, value); + return (value >> 8) & 0x7F; +} + +static int byt_get_max_pstate(void) +{ + u64 value; + rdmsrl(BYT_RATIOS, value); + return (value >> 16) & 0x7F; +} + +static int byt_get_turbo_pstate(void) +{ + u64 value; + rdmsrl(BYT_TURBO_RATIOS, value); + return value & 0x7F; +} + +static void byt_set_pstate(struct cpudata *cpudata, int pstate) +{ + u64 val; + int32_t vid_fp; + u32 vid; + + val = pstate << 8; + if (limits.no_turbo && !limits.turbo_disabled) + val |= (u64)1 << 32; + + vid_fp = cpudata->vid.min + mul_fp( + int_tofp(pstate - cpudata->pstate.min_pstate), + cpudata->vid.ratio); + + vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); + vid = fp_toint(vid_fp); + + if (pstate > cpudata->pstate.max_pstate) + vid = cpudata->vid.turbo; -static int intel_pstate_min_pstate(void) + val |= vid; + + wrmsrl(MSR_IA32_PERF_CTL, val); +} + +static void byt_get_vid(struct cpudata *cpudata) +{ + u64 value; + + + rdmsrl(BYT_VIDS, value); + cpudata->vid.min = int_tofp((value >> 8) & 0x7f); + cpudata->vid.max = int_tofp((value >> 16) & 0x7f); + cpudata->vid.ratio = div_fp( + cpudata->vid.max - cpudata->vid.min, + int_tofp(cpudata->pstate.max_pstate - + cpudata->pstate.min_pstate)); + + rdmsrl(BYT_TURBO_VIDS, value); + cpudata->vid.turbo = value & 0x7f; +} + + +static int core_get_min_pstate(void) { u64 value; rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 40) & 0xFF; } -static int intel_pstate_max_pstate(void) +static int core_get_max_pstate(void) { u64 value; rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 8) & 0xFF; } -static int intel_pstate_turbo_pstate(void) +static int core_get_turbo_pstate(void) { u64 value; int nont, ret; rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); - nont = intel_pstate_max_pstate(); + nont = core_get_max_pstate(); ret = ((value) & 255); if (ret <= nont) ret = nont; return ret; } +static void core_set_pstate(struct cpudata *cpudata, int pstate) +{ + u64 val; + + val = pstate << 8; + if (limits.no_turbo && !limits.turbo_disabled) + val |= (u64)1 << 32; + + wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); +} + +static struct cpu_defaults core_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 97, + .p_gain_pct = 20, + .d_gain_pct = 0, + .i_gain_pct = 0, + }, + .funcs = { + .get_max = core_get_max_pstate, + .get_min = core_get_min_pstate, + .get_turbo = core_get_turbo_pstate, + .set = core_set_pstate, + }, +}; + +static struct cpu_defaults byt_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 97, + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, + }, + .funcs = { + .get_max = byt_get_max_pstate, + .get_min = byt_get_min_pstate, + .get_turbo = byt_get_turbo_pstate, + .set = byt_set_pstate, + .get_vid = byt_get_vid, + }, +}; + + static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) { int max_perf = cpu->pstate.turbo_pstate; + int max_perf_adj; int min_perf; if (limits.no_turbo) max_perf = cpu->pstate.max_pstate; - max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); - *max = clamp_t(int, max_perf, + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); + *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); @@ -394,8 +525,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) trace_cpu_frequency(pstate * 100000, cpu->cpu); cpu->pstate.current_pstate = pstate; - wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); + pstate_funcs.set(cpu, pstate); } static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) @@ -415,27 +546,31 @@ static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { - sprintf(cpu->name, "Intel 2nd generation core"); - - cpu->pstate.min_pstate = intel_pstate_min_pstate(); - cpu->pstate.max_pstate = intel_pstate_max_pstate(); - cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate(); + cpu->pstate.min_pstate = pstate_funcs.get_min(); + cpu->pstate.max_pstate = pstate_funcs.get_max(); + cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); - /* - * goto max pstate so we don't slow up boot if we are built-in if we are - * a module we will take care of it during normal operation - */ - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); + if (pstate_funcs.get_vid) + pstate_funcs.get_vid(cpu); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } -static inline void intel_pstate_calc_busy(struct cpudata *cpu, - struct sample *sample) +static inline void intel_pstate_calc_busy(struct cpudata *cpu) { - u64 core_pct; - core_pct = div64_u64(sample->aperf * 100, sample->mperf); - sample->freq = cpu->pstate.max_pstate * core_pct * 1000; + struct sample *sample = &cpu->sample; + int64_t core_pct; + int32_t rem; + + core_pct = int_tofp(sample->aperf) * int_tofp(100); + core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem); + + if ((rem << 1) >= int_tofp(sample->mperf)) + core_pct += 1; - sample->core_pct_busy = core_pct; + sample->freq = fp_toint( + mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); + + sample->core_pct_busy = (int32_t)core_pct; } static inline void intel_pstate_sample(struct cpudata *cpu) @@ -444,13 +579,18 @@ static inline void intel_pstate_sample(struct cpudata *cpu) rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; - cpu->samples[cpu->sample_ptr].aperf = aperf; - cpu->samples[cpu->sample_ptr].mperf = mperf; - cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; - cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; - intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); + aperf = aperf >> FRAC_BITS; + mperf = mperf >> FRAC_BITS; + + cpu->last_sample_time = cpu->sample.time; + cpu->sample.time = ktime_get(); + cpu->sample.aperf = aperf; + cpu->sample.mperf = mperf; + cpu->sample.aperf -= cpu->prev_aperf; + cpu->sample.mperf -= cpu->prev_mperf; + + intel_pstate_calc_busy(cpu); cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; @@ -460,27 +600,37 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) { int sample_time, delay; - sample_time = cpu->pstate_policy->sample_rate_ms; + sample_time = pid_params.sample_rate_ms; delay = msecs_to_jiffies(sample_time); mod_timer_pinned(&cpu->timer, jiffies + delay); } -static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) +static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) { - int32_t busy_scaled; - int32_t core_busy, max_pstate, current_pstate; + int32_t core_busy, max_pstate, current_pstate, sample_ratio; + u32 duration_us; + u32 sample_time; - core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); + core_busy = cpu->sample.core_pct_busy; max_pstate = int_tofp(cpu->pstate.max_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate); - busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); + core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); + + sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC); + duration_us = (u32) ktime_us_delta(cpu->sample.time, + cpu->last_sample_time); + if (duration_us > sample_time * 3) { + sample_ratio = div_fp(int_tofp(sample_time), + int_tofp(duration_us)); + core_busy = mul_fp(core_busy, sample_ratio); + } - return fp_toint(busy_scaled); + return core_busy; } static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) { - int busy_scaled; + int32_t busy_scaled; struct _pid *pid; signed int ctl = 0; int steps; @@ -491,6 +641,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) ctl = pid_calc(pid, busy_scaled); steps = abs(ctl); + if (ctl < 0) intel_pstate_pstate_increase(cpu, steps); else @@ -500,58 +651,58 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; + struct sample *sample; intel_pstate_sample(cpu); + + sample = &cpu->sample; + intel_pstate_adjust_busy_pstate(cpu); - if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { - cpu->min_pstate_count++; - if (!(cpu->min_pstate_count % 5)) { - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); - } - } else - cpu->min_pstate_count = 0; + trace_pstate_sample(fp_toint(sample->core_pct_busy), + fp_toint(intel_pstate_get_scaled_busy(cpu)), + cpu->pstate.current_pstate, + sample->mperf, + sample->aperf, + sample->freq); intel_pstate_set_sample_time(cpu); } #define ICPU(model, policy) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ + (unsigned long)&policy } static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - ICPU(0x2a, default_policy), - ICPU(0x2d, default_policy), - ICPU(0x3a, default_policy), - ICPU(0x3c, default_policy), - ICPU(0x3e, default_policy), - ICPU(0x3f, default_policy), - ICPU(0x45, default_policy), - ICPU(0x46, default_policy), + ICPU(0x2a, core_params), + ICPU(0x2d, core_params), + ICPU(0x37, byt_params), + ICPU(0x3a, core_params), + ICPU(0x3c, core_params), + ICPU(0x3d, core_params), + ICPU(0x3e, core_params), + ICPU(0x3f, core_params), + ICPU(0x45, core_params), + ICPU(0x46, core_params), + ICPU(0x4f, core_params), + ICPU(0x56, core_params), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); static int intel_pstate_init_cpu(unsigned int cpunum) { - - const struct x86_cpu_id *id; struct cpudata *cpu; - id = x86_match_cpu(intel_pstate_cpu_ids); - if (!id) - return -ENODEV; - all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); if (!all_cpu_data[cpunum]) return -ENOMEM; cpu = all_cpu_data[cpunum]; + cpu->cpu = cpunum; intel_pstate_get_cpu_pstates(cpu); - cpu->cpu = cpunum; - cpu->pstate_policy = - (struct pstate_adjust_policy *)id->driver_data; init_timer_deferrable(&cpu->timer); cpu->timer.function = intel_pstate_timer_func; cpu->timer.data = @@ -559,7 +710,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); intel_pstate_sample(cpu); - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); add_timer_on(&cpu->timer, cpunum); @@ -576,7 +726,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) cpu = all_cpu_data[cpu_num]; if (!cpu) return 0; - sample = &cpu->samples[cpu->sample_ptr]; + sample = &cpu->sample; return sample->freq; } @@ -594,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_perf = int_tofp(1); limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); - limits.no_turbo = 0; + limits.no_turbo = limits.turbo_disabled; return 0; } limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; @@ -611,9 +761,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) static int intel_pstate_verify_policy(struct cpufreq_policy *policy) { - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) @@ -622,20 +770,24 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) return 0; } -static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) +static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) { - int cpu = policy->cpu; + int cpu_num = policy->cpu; + struct cpudata *cpu = all_cpu_data[cpu_num]; - del_timer(&all_cpu_data[cpu]->timer); - kfree(all_cpu_data[cpu]); - all_cpu_data[cpu] = NULL; - return 0; + pr_info("intel_pstate CPU %d exiting\n", cpu_num); + + del_timer_sync(&all_cpu_data[cpu_num]->timer); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); + kfree(all_cpu_data[cpu_num]); + all_cpu_data[cpu_num] = NULL; } static int intel_pstate_cpu_init(struct cpufreq_policy *policy) { - int rc, min_pstate, max_pstate; struct cpudata *cpu; + int rc; + u64 misc_en; rc = intel_pstate_init_cpu(policy->cpu); if (rc) @@ -643,15 +795,19 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (!limits.no_turbo && - limits.min_perf_pct == 100 && limits.max_perf_pct == 100) + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { + limits.turbo_disabled = 1; + limits.no_turbo = 1; + } + if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; - intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); - policy->min = min_pstate * 100000; - policy->max = max_pstate * 100000; + policy->min = cpu->pstate.min_pstate * 100000; + policy->max = cpu->pstate.turbo_pstate * 100000; /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; @@ -668,7 +824,7 @@ static struct cpufreq_driver intel_pstate_driver = { .setpolicy = intel_pstate_set_policy, .get = intel_pstate_get, .init = intel_pstate_cpu_init, - .exit = intel_pstate_cpu_exit, + .stop_cpu = intel_pstate_stop_cpu, .name = "intel_pstate", }; @@ -682,9 +838,9 @@ static int intel_pstate_msrs_not_valid(void) rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - if (!intel_pstate_min_pstate() || - !intel_pstate_max_pstate() || - !intel_pstate_turbo_pstate()) + if (!pstate_funcs.get_max() || + !pstate_funcs.get_min() || + !pstate_funcs.get_turbo()) return -ENODEV; rdmsrl(MSR_IA32_APERF, tmp); @@ -697,10 +853,97 @@ static int intel_pstate_msrs_not_valid(void) return 0; } + +static void copy_pid_params(struct pstate_adjust_policy *policy) +{ + pid_params.sample_rate_ms = policy->sample_rate_ms; + pid_params.p_gain_pct = policy->p_gain_pct; + pid_params.i_gain_pct = policy->i_gain_pct; + pid_params.d_gain_pct = policy->d_gain_pct; + pid_params.deadband = policy->deadband; + pid_params.setpoint = policy->setpoint; +} + +static void copy_cpu_funcs(struct pstate_funcs *funcs) +{ + pstate_funcs.get_max = funcs->get_max; + pstate_funcs.get_min = funcs->get_min; + pstate_funcs.get_turbo = funcs->get_turbo; + pstate_funcs.set = funcs->set; + pstate_funcs.get_vid = funcs->get_vid; +} + +#if IS_ENABLED(CONFIG_ACPI) +#include <acpi/processor.h> + +static bool intel_pstate_no_acpi_pss(void) +{ + int i; + + for_each_possible_cpu(i) { + acpi_status status; + union acpi_object *pss; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_processor *pr = per_cpu(processors, i); + + if (!pr) + continue; + + status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); + if (ACPI_FAILURE(status)) + continue; + + pss = buffer.pointer; + if (pss && pss->type == ACPI_TYPE_PACKAGE) { + kfree(pss); + return false; + } + + kfree(pss); + } + + return true; +} + +struct hw_vendor_info { + u16 valid; + char oem_id[ACPI_OEM_ID_SIZE]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; +}; + +/* Hardware vendor-specific info that has its own power management modes */ +static struct hw_vendor_info vendor_info[] = { + {1, "HP ", "ProLiant"}, + {0, "", ""}, +}; + +static bool intel_pstate_platform_pwr_mgmt_exists(void) +{ + struct acpi_table_header hdr; + struct hw_vendor_info *v_info; + + if (acpi_disabled + || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) + return false; + + for (v_info = vendor_info; v_info->valid; v_info++) { + if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) + && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) + && intel_pstate_no_acpi_pss()) + return true; + } + + return false; +} +#else /* CONFIG_ACPI not enabled */ +static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } +#endif /* CONFIG_ACPI */ + static int __init intel_pstate_init(void) { int cpu, rc = 0; const struct x86_cpu_id *id; + struct cpu_defaults *cpu_info; if (no_load) return -ENODEV; @@ -709,6 +952,18 @@ static int __init intel_pstate_init(void) if (!id) return -ENODEV; + /* + * The Intel pstate driver will be ignored if the platform + * firmware has its own power management modes. + */ + if (intel_pstate_platform_pwr_mgmt_exists()) + return -ENODEV; + + cpu_info = (struct cpu_defaults *)id->driver_data; + + copy_pid_params(&cpu_info->pid_policy); + copy_cpu_funcs(&cpu_info->funcs); + if (intel_pstate_msrs_not_valid()) return -ENODEV; @@ -724,6 +979,7 @@ static int __init intel_pstate_init(void) intel_pstate_debug_expose_params(); intel_pstate_sysfs_expose_params(); + return rc; out: get_online_cpus(); diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index ba10658a939..37a480680cd 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -43,9 +43,9 @@ static struct priv * table. */ static struct cpufreq_frequency_table kirkwood_freq_table[] = { - {STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */ - {STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */ - {0, CPUFREQ_TABLE_END}, + {0, STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */ + {0, STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */ + {0, 0, CPUFREQ_TABLE_END}, }; static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu) @@ -55,69 +55,37 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu) return kirkwood_freq_table[0].frequency; } -static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int index) +static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) { - struct cpufreq_freqs freqs; unsigned int state = kirkwood_freq_table[index].driver_data; unsigned long reg; - freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); - freqs.new = kirkwood_freq_table[index].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n", - kirkwood_freq_table[index].frequency); - dev_dbg(priv.dev, "old frequency was %i KHz\n", - kirkwood_cpufreq_get_cpu_frequency(0)); - - if (freqs.old != freqs.new) { - local_irq_disable(); - - /* Disable interrupts to the CPU */ - reg = readl_relaxed(priv.base); - reg |= CPU_SW_INT_BLK; - writel_relaxed(reg, priv.base); - - switch (state) { - case STATE_CPU_FREQ: - clk_disable(priv.powersave_clk); - break; - case STATE_DDR_FREQ: - clk_enable(priv.powersave_clk); - break; - } + local_irq_disable(); - /* Wait-for-Interrupt, while the hardware changes frequency */ - cpu_do_idle(); + /* Disable interrupts to the CPU */ + reg = readl_relaxed(priv.base); + reg |= CPU_SW_INT_BLK; + writel_relaxed(reg, priv.base); - /* Enable interrupts to the CPU */ - reg = readl_relaxed(priv.base); - reg &= ~CPU_SW_INT_BLK; - writel_relaxed(reg, priv.base); - - local_irq_enable(); + switch (state) { + case STATE_CPU_FREQ: + clk_disable(priv.powersave_clk); + break; + case STATE_DDR_FREQ: + clk_enable(priv.powersave_clk); + break; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, kirkwood_freq_table); -} -static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int index = 0; + /* Wait-for-Interrupt, while the hardware changes frequency */ + cpu_do_idle(); - if (cpufreq_frequency_table_target(policy, kirkwood_freq_table, - target_freq, relation, &index)) - return -EINVAL; + /* Enable interrupts to the CPU */ + reg = readl_relaxed(priv.base); + reg &= ~CPU_SW_INT_BLK; + writel_relaxed(reg, priv.base); - kirkwood_cpufreq_set_cpu_state(policy, index); + local_irq_enable(); return 0; } @@ -125,40 +93,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, /* Module init and exit code */ static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) { - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 5000; /* 5uS */ - policy->cur = kirkwood_cpufreq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table); - if (result) - return result; - - cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu); - - return 0; -} - -static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_generic_init(policy, kirkwood_freq_table, 5000); } -static struct freq_attr *kirkwood_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver kirkwood_cpufreq_driver = { + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .get = kirkwood_cpufreq_get_cpu_frequency, - .verify = kirkwood_cpufreq_verify, - .target = kirkwood_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = kirkwood_cpufreq_target, .init = kirkwood_cpufreq_cpu_init, - .exit = kirkwood_cpufreq_cpu_exit, .name = "kirkwood-cpufreq", - .attr = kirkwood_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int kirkwood_cpufreq_probe(struct platform_device *pdev) diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 4ada1cccb05..c913906a719 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, * Sets a new clock ratio. */ -static void longhaul_setstate(struct cpufreq_policy *policy, +static int longhaul_setstate(struct cpufreq_policy *policy, unsigned int table_index) { unsigned int mults_index; @@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy, /* Safety precautions */ mult = mults[mults_index & 0x1f]; if (mult == -1) - return; + return -EINVAL; + speed = calc_speed(mult); if ((speed > highest_speed) || (speed < lowest_speed)) - return; + return -EINVAL; + /* Voltage transition before frequency transition? */ if (can_scale_voltage && longhaul_index < table_index) dir = 1; @@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy, freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: @@ -385,12 +385,14 @@ retry_loop: goto retry_loop; } } - /* Report true CPU frequency */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - if (!bm_timeout) + if (!bm_timeout) { printk(KERN_INFO PFX "Warning: Timeout while waiting for " "idle PCI bus.\n"); + return -EBUSY; + } + + return 0; } /* @@ -475,7 +477,7 @@ static int longhaul_get_ranges(void) return -EINVAL; } - longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table), + longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table), GFP_KERNEL); if (!longhaul_table) return -ENOMEM; @@ -528,6 +530,7 @@ static int longhaul_get_ranges(void) static void longhaul_setup_voltagescaling(void) { + struct cpufreq_frequency_table *freq_pos; union msr_longhaul longhaul; struct mV_pos minvid, maxvid, vid; unsigned int j, speed, pos, kHz_step, numvscales; @@ -606,18 +609,16 @@ static void longhaul_setup_voltagescaling(void) /* Calculate kHz for one voltage step */ kHz_step = (highest_speed - min_vid_speed) / numvscales; - j = 0; - while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { - speed = longhaul_table[j].frequency; + cpufreq_for_each_entry(freq_pos, longhaul_table) { + speed = freq_pos->frequency; if (speed > min_vid_speed) pos = (speed - min_vid_speed) / kHz_step + minvid.pos; else pos = minvid.pos; - longhaul_table[j].driver_data |= mV_vrm_table[pos] << 8; + freq_pos->driver_data |= mV_vrm_table[pos] << 8; vid = vrm_mV_table[mV_vrm_table[pos]]; printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", - speed, j, vid.mV); - j++; + speed, (int)(freq_pos - longhaul_table), vid.mV); } can_scale_voltage = 1; @@ -625,30 +626,16 @@ static void longhaul_setup_voltagescaling(void) } -static int longhaul_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, longhaul_table); -} - - static int longhaul_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int table_index) { - unsigned int table_index = 0; unsigned int i; unsigned int dir = 0; u8 vid, current_vid; - - if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, - relation, &table_index)) - return -EINVAL; - - /* Don't set same frequency again */ - if (longhaul_index == table_index) - return 0; + int retval = 0; if (!can_scale_voltage) - longhaul_setstate(policy, table_index); + retval = longhaul_setstate(policy, table_index); else { /* On test system voltage transitions exceeding single * step up or down were turning motherboard off. Both @@ -663,7 +650,7 @@ static int longhaul_target(struct cpufreq_policy *policy, while (i != table_index) { vid = (longhaul_table[i].driver_data >> 8) & 0x1f; if (vid != current_vid) { - longhaul_setstate(policy, i); + retval = longhaul_setstate(policy, i); current_vid = vid; msleep(200); } @@ -672,10 +659,11 @@ static int longhaul_target(struct cpufreq_policy *policy, else i--; } - longhaul_setstate(policy, table_index); + retval = longhaul_setstate(policy, table_index); } + longhaul_index = table_index; - return 0; + return retval; } @@ -919,36 +907,17 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) longhaul_setup_voltagescaling(); policy->cpuinfo.transition_latency = 200000; /* nsec */ - policy->cur = calc_speed(longhaul_get_cpu_mult()); - - ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); - if (ret) - return ret; - - cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); - - return 0; -} -static int longhaul_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_table_validate_and_show(policy, longhaul_table); } -static struct freq_attr *longhaul_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver longhaul_driver = { - .verify = longhaul_verify, - .target = longhaul_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = longhaul_target, .get = longhaul_get, .init = longhaul_cpu_init, - .exit = longhaul_cpu_exit, .name = "longhaul", - .attr = longhaul_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id longhaul_id[] = { @@ -1002,7 +971,15 @@ static void __exit longhaul_exit(void) for (i = 0; i < numscales; i++) { if (mults[i] == maxmult) { + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + freqs.new = longhaul_table[i].frequency; + freqs.flags = 0; + + cpufreq_freq_transition_begin(policy, &freqs); longhaul_setstate(policy, i); + cpufreq_freq_transition_end(policy, &freqs, 0); break; } } diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 5aa031612d5..074971b1263 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c @@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy) return -EINVAL; policy->cpu = 0; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 7bc3c44d34e..d4add862194 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -24,8 +24,6 @@ static uint nowait; -static struct clk *cpuclk; - static void (*saved_cpu_wait) (void); static int loongson2_cpu_freq_notifier(struct notifier_block *nb, @@ -44,65 +42,34 @@ static int loongson2_cpu_freq_notifier(struct notifier_block *nb, return 0; } -static unsigned int loongson2_cpufreq_get(unsigned int cpu) -{ - return clk_get_rate(cpuclk); -} - /* * Here we notify other drivers of the proposed change and the final change. */ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { unsigned int cpu = policy->cpu; - unsigned int newstate = 0; cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; unsigned int freq; cpus_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); - if (cpufreq_frequency_table_target - (policy, &loongson2_clockmod_table[0], target_freq, relation, - &newstate)) - return -EINVAL; - freq = ((cpu_clock_freq / 1000) * - loongson2_clockmod_table[newstate].driver_data) / 8; - if (freq < policy->min || freq > policy->max) - return -EINVAL; - - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - - freqs.old = loongson2_cpufreq_get(cpu); - freqs.new = freq; - freqs.flags = 0; - - if (freqs.new == freqs.old) - return 0; - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + loongson2_clockmod_table[index].driver_data) / 8; set_cpus_allowed_ptr(current, &cpus_allowed); /* setting the cpu frequency */ - clk_set_rate(cpuclk, freq); - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: set frequency %u kHz\n", freq); + clk_set_rate(policy->clk, freq * 1000); return 0; } static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) { + struct clk *cpuclk; int i; unsigned long rate; int ret; @@ -125,46 +92,30 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) i++) loongson2_clockmod_table[i].frequency = (rate * i) / 8; - ret = clk_set_rate(cpuclk, rate); + ret = clk_set_rate(cpuclk, rate * 1000); if (ret) { clk_put(cpuclk); return ret; } - policy->cur = loongson2_cpufreq_get(policy->cpu); - - cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], - policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, - &loongson2_clockmod_table[0]); -} - -static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &loongson2_clockmod_table[0]); + policy->clk = cpuclk; + return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0); } static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) { - clk_put(cpuclk); + clk_put(policy->clk); return 0; } -static struct freq_attr *loongson2_table_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver loongson2_cpufreq_driver = { .name = "loongson2", .init = loongson2_cpufreq_cpu_init, - .verify = loongson2_cpufreq_verify, - .target = loongson2_cpufreq_target, - .get = loongson2_cpufreq_get, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = loongson2_cpufreq_target, + .get = cpufreq_generic_get, .exit = loongson2_cpufreq_exit, - .attr = loongson2_table_attr, + .attr = cpufreq_generic_attr, }; static struct platform_device_id platform_device_ids[] = { diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c index 6168d77b296..cc3408fc073 100644 --- a/drivers/cpufreq/maple-cpufreq.c +++ b/drivers/cpufreq/maple-cpufreq.c @@ -59,14 +59,9 @@ #define CPUFREQ_LOW 1 static struct cpufreq_frequency_table maple_cpu_freqs[] = { - {CPUFREQ_HIGH, 0}, - {CPUFREQ_LOW, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -static struct freq_attr *maple_cpu_freqs_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, + {0, CPUFREQ_HIGH, 0}, + {0, CPUFREQ_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; /* Power mode data is an array of the 32 bits PCR values to use for @@ -74,8 +69,6 @@ static struct freq_attr *maple_cpu_freqs_attr[] = { */ static int maple_pmode_cur; -static DEFINE_MUTEX(maple_switch_mutex); - static const u32 *maple_pmode_data; static int maple_pmode_max; @@ -135,37 +128,10 @@ static int maple_scom_query_freq(void) * Common interface to the cpufreq core */ -static int maple_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, maple_cpu_freqs); -} - static int maple_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { - unsigned int newstate = 0; - struct cpufreq_freqs freqs; - int rc; - - if (cpufreq_frequency_table_target(policy, maple_cpu_freqs, - target_freq, relation, &newstate)) - return -EINVAL; - - if (maple_pmode_cur == newstate) - return 0; - - mutex_lock(&maple_switch_mutex); - - freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; - freqs.new = maple_cpu_freqs[newstate].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - rc = maple_scom_switch_freq(newstate); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - mutex_unlock(&maple_switch_mutex); - - return rc; + return maple_scom_switch_freq(index); } static unsigned int maple_cpufreq_get_speed(unsigned int cpu) @@ -175,27 +141,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu) static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) { - policy->cpuinfo.transition_latency = 12000; - policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency; - /* secondary CPUs are tied to the primary one by the - * cpufreq core if in the secondary policy we tell it that - * it actually must be one policy together with all others. */ - cpumask_setall(policy->cpus); - cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, - maple_cpu_freqs); + return cpufreq_generic_init(policy, maple_cpu_freqs, 12000); } - static struct cpufreq_driver maple_cpufreq_driver = { .name = "maple", .flags = CPUFREQ_CONST_LOOPS, .init = maple_cpufreq_cpu_init, - .verify = maple_cpufreq_verify, - .target = maple_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = maple_cpufreq_target, .get = maple_cpufreq_get_speed, - .attr = maple_cpu_freqs_attr, + .attr = cpufreq_generic_attr, }; static int __init maple_cpufreq_init(void) diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index f31fcfcad51..5f69c9aa703 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -22,7 +22,7 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -36,65 +36,21 @@ static struct cpufreq_frequency_table *freq_table; static atomic_t freq_table_users = ATOMIC_INIT(0); -static struct clk *mpu_clk; static struct device *mpu_dev; static struct regulator *mpu_reg; -static int omap_verify_speed(struct cpufreq_policy *policy) +static int omap_target(struct cpufreq_policy *policy, unsigned int index) { - if (!freq_table) - return -EINVAL; - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int omap_getspeed(unsigned int cpu) -{ - unsigned long rate; - - if (cpu >= NR_CPUS) - return 0; - - rate = clk_get_rate(mpu_clk) / 1000; - return rate; -} - -static int omap_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int i; - int r, ret = 0; - struct cpufreq_freqs freqs; - struct opp *opp; + int r, ret; + struct dev_pm_opp *opp; unsigned long freq, volt = 0, volt_old = 0, tol = 0; + unsigned int old_freq, new_freq; - if (!freq_table) { - dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__, - policy->cpu); - return -EINVAL; - } - - ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &i); - if (ret) { - dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n", - __func__, policy->cpu, target_freq, ret); - return ret; - } - freqs.new = freq_table[i].frequency; - if (!freqs.new) { - dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__, - policy->cpu, target_freq); - return -EINVAL; - } + old_freq = policy->cur; + new_freq = freq_table[index].frequency; - freqs.old = omap_getspeed(policy->cpu); - - if (freqs.old == freqs.new && policy->cur == freqs.new) - return ret; - - freq = freqs.new * 1000; - ret = clk_round_rate(mpu_clk, freq); + freq = new_freq * 1000; + ret = clk_round_rate(policy->clk, freq); if (IS_ERR_VALUE(ret)) { dev_warn(mpu_dev, "CPUfreq: Cannot find matching frequency for %lu\n", @@ -105,143 +61,102 @@ static int omap_target(struct cpufreq_policy *policy, if (mpu_reg) { rcu_read_lock(); - opp = opp_find_freq_ceil(mpu_dev, &freq); + opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq); if (IS_ERR(opp)) { rcu_read_unlock(); dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", - __func__, freqs.new); + __func__, new_freq); return -EINVAL; } - volt = opp_get_voltage(opp); + volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); tol = volt * OPP_TOLERANCE / 100; volt_old = regulator_get_voltage(mpu_reg); } dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n", - freqs.old / 1000, volt_old ? volt_old / 1000 : -1, - freqs.new / 1000, volt ? volt / 1000 : -1); - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + old_freq / 1000, volt_old ? volt_old / 1000 : -1, + new_freq / 1000, volt ? volt / 1000 : -1); /* scaling up? scale voltage before frequency */ - if (mpu_reg && (freqs.new > freqs.old)) { + if (mpu_reg && (new_freq > old_freq)) { r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol); if (r < 0) { dev_warn(mpu_dev, "%s: unable to scale voltage up.\n", __func__); - freqs.new = freqs.old; - goto done; + return r; } } - ret = clk_set_rate(mpu_clk, freqs.new * 1000); + ret = clk_set_rate(policy->clk, new_freq * 1000); /* scaling down? scale voltage after frequency */ - if (mpu_reg && (freqs.new < freqs.old)) { + if (mpu_reg && (new_freq < old_freq)) { r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol); if (r < 0) { dev_warn(mpu_dev, "%s: unable to scale voltage down.\n", __func__); - ret = clk_set_rate(mpu_clk, freqs.old * 1000); - freqs.new = freqs.old; - goto done; + clk_set_rate(policy->clk, old_freq * 1000); + return r; } } - freqs.new = omap_getspeed(policy->cpu); - -done: - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return ret; } static inline void freq_table_free(void) { if (atomic_dec_and_test(&freq_table_users)) - opp_free_cpufreq_table(mpu_dev, &freq_table); + dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table); } static int omap_cpu_init(struct cpufreq_policy *policy) { - int result = 0; - - mpu_clk = clk_get(NULL, "cpufreq_ck"); - if (IS_ERR(mpu_clk)) - return PTR_ERR(mpu_clk); + int result; - if (policy->cpu >= NR_CPUS) { - result = -EINVAL; - goto fail_ck; - } - - policy->cur = omap_getspeed(policy->cpu); - - if (!freq_table) - result = opp_init_cpufreq_table(mpu_dev, &freq_table); + policy->clk = clk_get(NULL, "cpufreq_ck"); + if (IS_ERR(policy->clk)) + return PTR_ERR(policy->clk); - if (result) { - dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", + if (!freq_table) { + result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table); + if (result) { + dev_err(mpu_dev, + "%s: cpu%d: failed creating freq table[%d]\n", __func__, policy->cpu, result); - goto fail_ck; + goto fail; + } } atomic_inc_return(&freq_table_users); - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (result) - goto fail_table; - - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - - policy->cur = omap_getspeed(policy->cpu); - - /* - * On OMAP SMP configuartion, both processors share the voltage - * and clock. So both CPUs needs to be scaled together and hence - * needs software co-ordination. Use cpufreq affected_cpus - * interface to handle this scenario. Additional is_smp() check - * is to keep SMP_ON_UP build working. - */ - if (is_smp()) - cpumask_setall(policy->cpus); - /* FIXME: what's the actual transition time? */ - policy->cpuinfo.transition_latency = 300 * 1000; - - return 0; + result = cpufreq_generic_init(policy, freq_table, 300 * 1000); + if (!result) + return 0; -fail_table: freq_table_free(); -fail_ck: - clk_put(mpu_clk); +fail: + clk_put(policy->clk); return result; } static int omap_cpu_exit(struct cpufreq_policy *policy) { freq_table_free(); - clk_put(mpu_clk); + clk_put(policy->clk); return 0; } -static struct freq_attr *omap_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver omap_driver = { - .flags = CPUFREQ_STICKY, - .verify = omap_verify_speed, - .target = omap_target, - .get = omap_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = omap_target, + .get = cpufreq_generic_get, .init = omap_cpu_init, .exit = omap_cpu_exit, .name = "omap", - .attr = omap_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int omap_cpufreq_probe(struct platform_device *pdev) diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 2f0a2a65c37..529cfd92158 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -92,60 +92,34 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) static struct cpufreq_frequency_table p4clockmod_table[] = { - {DC_RESV, CPUFREQ_ENTRY_INVALID}, - {DC_DFLT, 0}, - {DC_25PT, 0}, - {DC_38PT, 0}, - {DC_50PT, 0}, - {DC_64PT, 0}, - {DC_75PT, 0}, - {DC_88PT, 0}, - {DC_DISABLE, 0}, - {DC_RESV, CPUFREQ_TABLE_END}, + {0, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {0, DC_DFLT, 0}, + {0, DC_25PT, 0}, + {0, DC_38PT, 0}, + {0, DC_50PT, 0}, + {0, DC_64PT, 0}, + {0, DC_75PT, 0}, + {0, DC_88PT, 0}, + {0, DC_DISABLE, 0}, + {0, DC_RESV, CPUFREQ_TABLE_END}, }; -static int cpufreq_p4_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned int newstate = DC_RESV; - struct cpufreq_freqs freqs; int i; - if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], - target_freq, relation, &newstate)) - return -EINVAL; - - freqs.old = cpufreq_p4_get(policy->cpu); - freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8; - - if (freqs.new == freqs.old) - return 0; - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* run on each logical CPU, * see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ for_each_cpu(i, policy->cpus) - cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data); - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data); return 0; } -static int cpufreq_p4_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); -} - - static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) { if (c->x86 == 0x06) { @@ -230,25 +204,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) else p4clockmod_table[i].frequency = (stock_freq * i)/8; } - cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); /* cpuinfo and default policy values */ /* the transition latency is set to be 1 higher than the maximum * transition latency of the ondemand governor */ policy->cpuinfo.transition_latency = 10000001; - policy->cur = stock_freq; - return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); + return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]); } -static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - static unsigned int cpufreq_p4_get(unsigned int cpu) { u32 l, h; @@ -267,19 +233,13 @@ static unsigned int cpufreq_p4_get(unsigned int cpu) return stock_freq; } -static struct freq_attr *p4clockmod_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver p4clockmod_driver = { - .verify = cpufreq_p4_verify, - .target = cpufreq_p4_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cpufreq_p4_target, .init = cpufreq_p4_cpu_init, - .exit = cpufreq_p4_cpu_exit, .get = cpufreq_p4_get, .name = "p4-clockmod", - .attr = p4clockmod_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id cpufreq_p4_id[] = { diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 534e43a60d1..35dd4d7ffee 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -28,6 +28,7 @@ #include <linux/cpufreq.h> #include <linux/timer.h> #include <linux/module.h> +#include <linux/of_address.h> #include <asm/hw_irq.h> #include <asm/io.h> @@ -51,8 +52,6 @@ static void __iomem *sdcpwr_mapbase; static void __iomem *sdcasr_mapbase; -static DEFINE_MUTEX(pas_switch_mutex); - /* Current astate, is used when waking up from power savings on * one core, in case the other core has switched states during * the idle time. @@ -61,17 +60,12 @@ static int current_astate; /* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */ static struct cpufreq_frequency_table pas_freqs[] = { - {0, 0}, - {1, 0}, - {2, 0}, - {3, 0}, - {4, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -static struct freq_attr *pas_cpu_freqs_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, + {0, 0, 0}, + {0, 1, 0}, + {0, 2, 0}, + {0, 3, 0}, + {0, 4, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; /* @@ -142,9 +136,10 @@ void restore_astate(int cpu) static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) { + struct cpufreq_frequency_table *pos; const u32 *max_freqp; u32 max_freq; - int i, cur_astate; + int cur_astate; struct resource res; struct device_node *cpu, *dn; int err = -ENODEV; @@ -203,28 +198,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_debug("initializing frequency table\n"); /* initialize frequency table */ - for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { - pas_freqs[i].frequency = - get_astate_freq(pas_freqs[i].driver_data) * 100000; - pr_debug("%d: %d\n", i, pas_freqs[i].frequency); + cpufreq_for_each_entry(pos, pas_freqs) { + pos->frequency = get_astate_freq(pos->driver_data) * 100000; + pr_debug("%d: %d\n", (int)(pos - pas_freqs), pos->frequency); } - policy->cpuinfo.transition_latency = get_gizmo_latency(); - cur_astate = get_cur_astate(policy->cpu); pr_debug("current astate is at %d\n",cur_astate); policy->cur = pas_freqs[cur_astate].frequency; - cpumask_copy(policy->cpus, cpu_online_mask); - ppc_proc_freq = policy->cur * 1000ul; - cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu); - - /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max - * are set correctly - */ - return cpufreq_frequency_table_cpuinfo(policy, pas_freqs); + return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); out_unmap_sdcpwr: iounmap(sdcpwr_mapbase); @@ -249,35 +234,14 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) if (sdcpwr_mapbase) iounmap(sdcpwr_mapbase); - cpufreq_frequency_table_put_attr(policy->cpu); return 0; } -static int pas_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, pas_freqs); -} - static int pas_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int pas_astate_new) { - struct cpufreq_freqs freqs; - int pas_astate_new; int i; - cpufreq_frequency_table_target(policy, - pas_freqs, - target_freq, - relation, - &pas_astate_new); - - freqs.old = policy->cur; - freqs.new = pas_freqs[pas_astate_new].frequency; - - mutex_lock(&pas_switch_mutex); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", policy->cpu, pas_freqs[pas_astate_new].frequency, @@ -288,10 +252,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy, for_each_online_cpu(i) set_astate(i, pas_astate_new); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - mutex_unlock(&pas_switch_mutex); - - ppc_proc_freq = freqs.new * 1000ul; + ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul; return 0; } @@ -300,9 +261,9 @@ static struct cpufreq_driver pas_cpufreq_driver = { .flags = CPUFREQ_CONST_LOOPS, .init = pas_cpufreq_cpu_init, .exit = pas_cpufreq_cpu_exit, - .verify = pas_cpufreq_verify, - .target = pas_cpufreq_target, - .attr = pas_cpu_freqs_attr, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = pas_cpufreq_target, + .attr = cpufreq_generic_attr, }; /* diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index d81c4e5ea0a..728a2d87949 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info; static int pcc_cpufreq_verify(struct cpufreq_policy *policy) { - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); return 0; } @@ -214,8 +213,9 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, cpu, target_freq, (pcch_virt_addr + pcc_cpu_data->input_offset)); + freqs.old = policy->cur; freqs.new = target_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); input_buffer = 0x1 | (((target_freq * 100) / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); @@ -229,25 +229,20 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); status = ioread16(&pcch_hdr->status); + iowrite16(0, &pcch_hdr->status); + + cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE); + spin_unlock(&pcc_lock); + if (status != CMD_COMPLETE) { pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", cpu, status); - goto cmd_incomplete; + return -EINVAL; } - iowrite16(0, &pcch_hdr->status); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); - spin_unlock(&pcc_lock); return 0; - -cmd_incomplete: - freqs.new = freqs.old; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - iowrite16(0, &pcch_hdr->status); - spin_unlock(&pcc_lock); - return -EINVAL; } static int pcc_get_offset(int cpu) @@ -396,15 +391,14 @@ static int __init pcc_cpufreq_probe(void) struct pcc_memory_resource *mem_resource; struct pcc_register_resource *reg_resource; union acpi_object *out_obj, *member; - acpi_handle handle, osc_handle, pcch_handle; + acpi_handle handle, osc_handle; int ret = 0; status = acpi_get_handle(NULL, "\\_SB", &handle); if (ACPI_FAILURE(status)) return -ENODEV; - status = acpi_get_handle(handle, "PCCH", &pcch_handle); - if (ACPI_FAILURE(status)) + if (!acpi_has_method(handle, "PCCH")) return -ENODEV; status = acpi_get_handle(handle, "_OSC", &osc_handle); @@ -560,13 +554,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) ioread32(&pcch_hdr->nominal) * 1000; policy->min = policy->cpuinfo.min_freq = ioread32(&pcch_hdr->minimum_frequency) * 1000; - policy->cur = pcc_get_freq(cpu); - - if (!policy->cur) { - pr_debug("init: Unable to get current CPU frequency\n"); - result = -EINVAL; - goto out; - } pr_debug("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index a096cd3fa23..7615180d7ee 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -81,14 +81,9 @@ static int is_pmu_based; #define CPUFREQ_LOW 1 static struct cpufreq_frequency_table pmac_cpu_freqs[] = { - {CPUFREQ_HIGH, 0}, - {CPUFREQ_LOW, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -static struct freq_attr* pmac_cpu_freqs_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, + {0, CPUFREQ_HIGH, 0}, + {0, CPUFREQ_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; static inline void local_delay(unsigned long ms) @@ -336,21 +331,11 @@ static int pmu_set_cpu_speed(int low_speed) return 0; } -static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode, - int notify) +static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode) { - struct cpufreq_freqs freqs; unsigned long l3cr; static unsigned long prev_l3cr; - freqs.old = cur_freq; - freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; - - if (freqs.old == freqs.new) - return 0; - - if (notify) - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (speed_mode == CPUFREQ_LOW && cpu_has_feature(CPU_FTR_L3CR)) { l3cr = _get_L3CR(); @@ -366,8 +351,6 @@ static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode, if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr) _set_L3CR(prev_l3cr); } - if (notify) - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; return 0; @@ -378,23 +361,12 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu) return cur_freq; } -static int pmac_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); -} - static int pmac_cpufreq_target( struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { - unsigned int newstate = 0; int rc; - if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs, - target_freq, relation, &newstate)) - return -EINVAL; - - rc = do_set_cpu_speed(policy, newstate, 1); + rc = do_set_cpu_speed(policy, index); ppc_proc_freq = cur_freq * 1000ul; return rc; @@ -402,14 +374,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy, static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) { - if (policy->cpu != 0) - return -ENODEV; - - policy->cpuinfo.transition_latency = transition_latency; - policy->cur = cur_freq; - - cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu); - return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); + return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency); } static u32 read_gpio(struct device_node *np) @@ -443,7 +408,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) no_schedule = 1; sleep_freq = cur_freq; if (cur_freq == low_freq && !is_pmu_based) - do_set_cpu_speed(policy, CPUFREQ_HIGH, 0); + do_set_cpu_speed(policy, CPUFREQ_HIGH); return 0; } @@ -460,7 +425,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy) * probably high speed due to our suspend() routine */ do_set_cpu_speed(policy, sleep_freq == low_freq ? - CPUFREQ_LOW : CPUFREQ_HIGH, 0); + CPUFREQ_LOW : CPUFREQ_HIGH); ppc_proc_freq = cur_freq * 1000ul; @@ -469,14 +434,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy) } static struct cpufreq_driver pmac_cpufreq_driver = { - .verify = pmac_cpufreq_verify, - .target = pmac_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = pmac_cpufreq_target, .get = pmac_cpufreq_get_speed, .init = pmac_cpufreq_cpu_init, .suspend = pmac_cpufreq_suspend, .resume = pmac_cpufreq_resume, .flags = CPUFREQ_PM_NO_WARN, - .attr = pmac_cpu_freqs_attr, + .attr = cpufreq_generic_attr, .name = "powermac", }; diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 3a51ad7e47c..8bc422977b5 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -65,14 +65,9 @@ #define CPUFREQ_LOW 1 static struct cpufreq_frequency_table g5_cpu_freqs[] = { - {CPUFREQ_HIGH, 0}, - {CPUFREQ_LOW, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -static struct freq_attr* g5_cpu_freqs_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, + {0, CPUFREQ_HIGH, 0}, + {0, CPUFREQ_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; /* Power mode data is an array of the 32 bits PCR values to use for @@ -84,8 +79,6 @@ static void (*g5_switch_volt)(int speed_mode); static int (*g5_switch_freq)(int speed_mode); static int (*g5_query_freq)(void); -static DEFINE_MUTEX(g5_switch_mutex); - static unsigned long transition_latency; #ifdef CONFIG_PMAC_SMU @@ -142,7 +135,7 @@ static void g5_vdnap_switch_volt(int speed_mode) pmf_call_one(pfunc_vdnap0_complete, &args); if (done) break; - msleep(1); + usleep_range(1000, 1000); } if (done == 0) printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); @@ -241,7 +234,7 @@ static void g5_pfunc_switch_volt(int speed_mode) if (pfunc_cpu1_volt_low) pmf_call_one(pfunc_cpu1_volt_low, NULL); } - msleep(10); /* should be faster , to fix */ + usleep_range(10000, 10000); /* should be faster , to fix */ } /* @@ -286,7 +279,7 @@ static int g5_pfunc_switch_freq(int speed_mode) pmf_call_one(pfunc_slewing_done, &args); if (done) break; - msleep(1); + usleep_range(500, 500); } if (done == 0) printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); @@ -317,37 +310,9 @@ static int g5_pfunc_query_freq(void) * Common interface to the cpufreq core */ -static int g5_cpufreq_verify(struct cpufreq_policy *policy) +static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) { - return cpufreq_frequency_table_verify(policy, g5_cpu_freqs); -} - -static int g5_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - unsigned int newstate = 0; - struct cpufreq_freqs freqs; - int rc; - - if (cpufreq_frequency_table_target(policy, g5_cpu_freqs, - target_freq, relation, &newstate)) - return -EINVAL; - - if (g5_pmode_cur == newstate) - return 0; - - mutex_lock(&g5_switch_mutex); - - freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; - freqs.new = g5_cpu_freqs[newstate].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - rc = g5_switch_freq(newstate); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - mutex_unlock(&g5_switch_mutex); - - return rc; + return g5_switch_freq(index); } static unsigned int g5_cpufreq_get_speed(unsigned int cpu) @@ -357,27 +322,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu) static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) { - policy->cpuinfo.transition_latency = transition_latency; - policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; - /* secondary CPUs are tied to the primary one by the - * cpufreq core if in the secondary policy we tell it that - * it actually must be one policy together with all others. */ - cpumask_copy(policy->cpus, cpu_online_mask); - cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, - g5_cpu_freqs); + return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency); } - static struct cpufreq_driver g5_cpufreq_driver = { .name = "powermac", .flags = CPUFREQ_CONST_LOOPS, .init = g5_cpufreq_cpu_init, - .verify = g5_cpufreq_verify, - .target = g5_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = g5_cpufreq_target, .get = g5_cpufreq_get_speed, - .attr = g5_cpu_freqs_attr, + .attr = cpufreq_generic_attr, }; @@ -397,7 +352,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode) /* Check supported platforms */ if (of_machine_is_compatible("PowerMac8,1") || of_machine_is_compatible("PowerMac8,2") || - of_machine_is_compatible("PowerMac9,1")) + of_machine_is_compatible("PowerMac9,1") || + of_machine_is_compatible("PowerMac12,1")) use_volts_smu = 1; else if (of_machine_is_compatible("PowerMac11,2")) use_volts_vdnap = 1; @@ -647,8 +603,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) g5_cpu_freqs[0].frequency = max_freq; g5_cpu_freqs[1].frequency = min_freq; + /* Based on a measurement on Xserve G5, rounded up. */ + transition_latency = 10 * NSEC_PER_MSEC; + /* Set callbacks */ - transition_latency = CPUFREQ_ETERNAL; g5_switch_volt = g5_pfunc_switch_volt; g5_switch_freq = g5_pfunc_switch_freq; g5_query_freq = g5_pfunc_query_freq; diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 85f1c8c25dd..c8012bc8691 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -26,165 +26,214 @@ static unsigned int busfreq; /* FSB, in 10 kHz */ static unsigned int max_multiplier; +static unsigned int param_busfreq = 0; +static unsigned int param_max_multiplier = 0; + +module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO); +MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)"); + +module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO); +MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz"); /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ static struct cpufreq_frequency_table clock_ratio[] = { - {45, /* 000 -> 4.5x */ 0}, - {50, /* 001 -> 5.0x */ 0}, - {40, /* 010 -> 4.0x */ 0}, - {55, /* 011 -> 5.5x */ 0}, - {20, /* 100 -> 2.0x */ 0}, - {30, /* 101 -> 3.0x */ 0}, - {60, /* 110 -> 6.0x */ 0}, - {35, /* 111 -> 3.5x */ 0}, - {0, CPUFREQ_TABLE_END} + {0, 60, /* 110 -> 6.0x */ 0}, + {0, 55, /* 011 -> 5.5x */ 0}, + {0, 50, /* 001 -> 5.0x */ 0}, + {0, 45, /* 000 -> 4.5x */ 0}, + {0, 40, /* 010 -> 4.0x */ 0}, + {0, 35, /* 111 -> 3.5x */ 0}, + {0, 30, /* 101 -> 3.0x */ 0}, + {0, 20, /* 100 -> 2.0x */ 0}, + {0, 0, CPUFREQ_TABLE_END} +}; + +static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 }; +static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 }; + +static const struct { + unsigned freq; + unsigned mult; +} usual_frequency_table[] = { + { 400000, 40 }, // 100 * 4 + { 450000, 45 }, // 100 * 4.5 + { 475000, 50 }, // 95 * 5 + { 500000, 50 }, // 100 * 5 + { 506250, 45 }, // 112.5 * 4.5 + { 533500, 55 }, // 97 * 5.5 + { 550000, 55 }, // 100 * 5.5 + { 562500, 50 }, // 112.5 * 5 + { 570000, 60 }, // 95 * 6 + { 600000, 60 }, // 100 * 6 + { 618750, 55 }, // 112.5 * 5.5 + { 660000, 55 }, // 120 * 5.5 + { 675000, 60 }, // 112.5 * 6 + { 720000, 60 }, // 120 * 6 }; +#define FREQ_RANGE 3000 /** * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier * - * Returns the current setting of the frequency multiplier. Core clock + * Returns the current setting of the frequency multiplier. Core clock * speed is frequency of the Front-Side Bus multiplied with this value. */ static int powernow_k6_get_cpu_multiplier(void) { - u64 invalue = 0; + unsigned long invalue = 0; u32 msrval; + local_irq_disable(); + msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ - return clock_ratio[(invalue >> 5)&7].driver_data; -} + local_irq_enable(); + return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data; +} -/** - * powernow_k6_set_state - set the PowerNow! multiplier - * @best_i: clock_ratio[best_i] is the target multiplier - * - * Tries to change the PowerNow! multiplier - */ -static void powernow_k6_set_state(struct cpufreq_policy *policy, - unsigned int best_i) +static void powernow_k6_set_cpu_multiplier(unsigned int best_i) { - unsigned long outvalue = 0, invalue = 0; + unsigned long outvalue, invalue; unsigned long msrval; - struct cpufreq_freqs freqs; - - if (clock_ratio[best_i].driver_data > max_multiplier) { - printk(KERN_ERR PFX "invalid target frequency\n"); - return; - } - - freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); - freqs.new = busfreq * clock_ratio[best_i].driver_data; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + unsigned long cr0; /* we now need to transform best_i to the BVC format, see AMD#23446 */ - outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); + /* + * The processor doesn't respond to inquiry cycles while changing the + * frequency, so we must disable cache. + */ + local_irq_disable(); + cr0 = read_cr0(); + write_cr0(cr0 | X86_CR0_CD); + wbinvd(); + + outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5); msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); - invalue = invalue & 0xf; + invalue = invalue & 0x1f; outvalue = outvalue | invalue; - outl(outvalue , (POWERNOW_IOPORT + 0x8)); + outl(outvalue, (POWERNOW_IOPORT + 0x8)); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return; -} - - -/** - * powernow_k6_verify - verifies a new CPUfreq policy - * @policy: new policy - * - * Policy must be within lowest and highest possible CPU Frequency, - * and at least one possible state must be within min and max. - */ -static int powernow_k6_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); + write_cr0(cr0); + local_irq_enable(); } - /** - * powernow_k6_setpolicy - sets a new CPUFreq policy - * @policy: new policy - * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency - * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * powernow_k6_target - set the PowerNow! multiplier + * @best_i: clock_ratio[best_i] is the target multiplier * - * sets a new CPUFreq policy + * Tries to change the PowerNow! multiplier */ static int powernow_k6_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int best_i) { - unsigned int newstate = 0; - if (cpufreq_frequency_table_target(policy, &clock_ratio[0], - target_freq, relation, &newstate)) + if (clock_ratio[best_i].driver_data > max_multiplier) { + printk(KERN_ERR PFX "invalid target frequency\n"); return -EINVAL; + } - powernow_k6_set_state(policy, newstate); + powernow_k6_set_cpu_multiplier(best_i); return 0; } - static int powernow_k6_cpu_init(struct cpufreq_policy *policy) { + struct cpufreq_frequency_table *pos; unsigned int i, f; - int result; + unsigned khz; if (policy->cpu != 0) return -ENODEV; - /* get frequencies */ - max_multiplier = powernow_k6_get_cpu_multiplier(); - busfreq = cpu_khz / max_multiplier; + max_multiplier = 0; + khz = cpu_khz; + for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) { + if (khz >= usual_frequency_table[i].freq - FREQ_RANGE && + khz <= usual_frequency_table[i].freq + FREQ_RANGE) { + khz = usual_frequency_table[i].freq; + max_multiplier = usual_frequency_table[i].mult; + break; + } + } + if (param_max_multiplier) { + cpufreq_for_each_entry(pos, clock_ratio) + if (pos->driver_data == param_max_multiplier) { + max_multiplier = param_max_multiplier; + goto have_max_multiplier; + } + printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n"); + return -EINVAL; + } + + if (!max_multiplier) { + printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz); + printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n"); + return -EOPNOTSUPP; + } + +have_max_multiplier: + param_max_multiplier = max_multiplier; + + if (param_busfreq) { + if (param_busfreq >= 50000 && param_busfreq <= 150000) { + busfreq = param_busfreq / 10; + goto have_busfreq; + } + printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n"); + return -EINVAL; + } + + busfreq = khz / max_multiplier; +have_busfreq: + param_busfreq = busfreq * 10; /* table init */ - for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { - f = clock_ratio[i].driver_data; + cpufreq_for_each_entry(pos, clock_ratio) { + f = pos->driver_data; if (f > max_multiplier) - clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; + pos->frequency = CPUFREQ_ENTRY_INVALID; else - clock_ratio[i].frequency = busfreq * f; + pos->frequency = busfreq * f; } /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 200000; - policy->cur = busfreq * max_multiplier; - - result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); - if (result) - return result; + policy->cpuinfo.transition_latency = 500000; - cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); - - return 0; + return cpufreq_table_validate_and_show(policy, clock_ratio); } static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) { unsigned int i; - for (i = 0; i < 8; i++) { - if (i == max_multiplier) - powernow_k6_set_state(policy, i); + + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].driver_data == max_multiplier) { + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + freqs.new = clock_ratio[i].frequency; + freqs.flags = 0; + + cpufreq_freq_transition_begin(policy, &freqs); + powernow_k6_target(policy, i); + cpufreq_freq_transition_end(policy, &freqs, 0); + break; + } } - cpufreq_frequency_table_put_attr(policy->cpu); return 0; } @@ -195,19 +244,14 @@ static unsigned int powernow_k6_get(unsigned int cpu) return ret; } -static struct freq_attr *powernow_k6_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver powernow_k6_driver = { - .verify = powernow_k6_verify, - .target = powernow_k6_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernow_k6_target, .init = powernow_k6_cpu_init, .exit = powernow_k6_cpu_exit, .get = powernow_k6_get, .name = "powernow-k6", - .attr = powernow_k6_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id powernow_k6_ids[] = { diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 14ce480be8a..e61e224475a 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -248,7 +248,7 @@ static void change_VID(int vid) } -static void change_speed(struct cpufreq_policy *policy, unsigned int index) +static int powernow_target(struct cpufreq_policy *policy, unsigned int index) { u8 fid, vid; struct cpufreq_freqs freqs; @@ -269,8 +269,6 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index) freqs.new = powernow_table[index].frequency; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* Now do the magic poking into the MSRs. */ if (have_a0 == 1) /* A0 errata 5 */ @@ -290,7 +288,7 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index) if (have_a0 == 1) local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + return 0; } @@ -533,27 +531,6 @@ static int powernow_decode_bios(int maxfid, int startvid) } -static int powernow_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate; - - if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, - relation, &newstate)) - return -EINVAL; - - change_speed(policy, newstate); - - return 0; -} - - -static int powernow_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, powernow_table); -} - /* * We use the fact that the bus frequency is somehow * a multiple of 100000/3 khz, then we compute sgtc according @@ -678,17 +655,11 @@ static int powernow_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency); - policy->cur = powernow_get(0); - - cpufreq_frequency_table_get_attr(powernow_table, policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, powernow_table); + return cpufreq_table_validate_and_show(policy, powernow_table); } static int powernow_cpu_exit(struct cpufreq_policy *policy) { - cpufreq_frequency_table_put_attr(policy->cpu); - #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (acpi_processor_perf) { acpi_processor_unregister_performance(acpi_processor_perf, 0); @@ -701,14 +672,9 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy) return 0; } -static struct freq_attr *powernow_table_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver powernow_driver = { - .verify = powernow_verify, - .target = powernow_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernow_target, .get = powernow_get, #ifdef CONFIG_X86_POWERNOW_K7_ACPI .bios_limit = acpi_processor_get_bios_limit, @@ -716,7 +682,7 @@ static struct cpufreq_driver powernow_driver = { .init = powernow_cpu_init, .exit = powernow_cpu_exit, .name = "powernow-k7", - .attr = powernow_table_attr, + .attr = cpufreq_generic_attr, }; static int __init powernow_init(void) diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 2344a9ed17f..f9ce7e4bf0f 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -27,6 +27,8 @@ * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf) */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/smp.h> #include <linux/module.h> @@ -45,7 +47,6 @@ #include <linux/mutex.h> #include <acpi/processor.h> -#define PFX "powernow-k8: " #define VERSION "version 2.20.00" #include "powernow-k8.h" @@ -161,7 +162,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) u32 i = 0; if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { - printk(KERN_ERR PFX "internal error - overflow on fid write\n"); + pr_err("internal error - overflow on fid write\n"); return 1; } @@ -175,9 +176,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) do { wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); if (i++ > 100) { - printk(KERN_ERR PFX - "Hardware error - pending bit very stuck - " - "no further pstate changes possible\n"); + pr_err("Hardware error - pending bit very stuck - no further pstate changes possible\n"); return 1; } } while (query_current_values_with_pending_wait(data)); @@ -185,15 +184,13 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) count_off_irt(data); if (savevid != data->currvid) { - printk(KERN_ERR PFX - "vid change on fid trans, old 0x%x, new 0x%x\n", - savevid, data->currvid); + pr_err("vid change on fid trans, old 0x%x, new 0x%x\n", + savevid, data->currvid); return 1; } if (fid != data->currfid) { - printk(KERN_ERR PFX - "fid trans failed, fid 0x%x, curr 0x%x\n", fid, + pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid, data->currfid); return 1; } @@ -209,7 +206,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) int i = 0; if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { - printk(KERN_ERR PFX "internal error - overflow on vid write\n"); + pr_err("internal error - overflow on vid write\n"); return 1; } @@ -223,23 +220,19 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) do { wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); if (i++ > 100) { - printk(KERN_ERR PFX "internal error - pending bit " - "very stuck - no further pstate " - "changes possible\n"); + pr_err("internal error - pending bit very stuck - no further pstate changes possible\n"); return 1; } } while (query_current_values_with_pending_wait(data)); if (savefid != data->currfid) { - printk(KERN_ERR PFX "fid changed on vid trans, old " - "0x%x new 0x%x\n", - savefid, data->currfid); + pr_err("fid changed on vid trans, old 0x%x new 0x%x\n", + savefid, data->currfid); return 1; } if (vid != data->currvid) { - printk(KERN_ERR PFX "vid trans failed, vid 0x%x, " - "curr 0x%x\n", + pr_err("vid trans failed, vid 0x%x, curr 0x%x\n", vid, data->currvid); return 1; } @@ -283,8 +276,7 @@ static int transition_fid_vid(struct powernow_k8_data *data, return 1; if ((reqfid != data->currfid) || (reqvid != data->currvid)) { - printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, " - "curr 0x%x 0x%x\n", + pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", smp_processor_id(), reqfid, reqvid, data->currfid, data->currvid); return 1; @@ -304,8 +296,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 savefid = data->currfid; u32 maxvid, lo, rvomult = 1; - pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " - "reqvid 0x%x, rvo 0x%x\n", + pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqvid, data->rvo); @@ -342,8 +333,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, return 1; if (savefid != data->currfid) { - printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", - data->currfid); + pr_err("ph1 err, currfid changed 0x%x\n", data->currfid); return 1; } @@ -360,13 +350,11 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) u32 fid_interval, savevid = data->currvid; if (data->currfid == reqfid) { - printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", - data->currfid); + pr_err("ph2 null fid transition 0x%x\n", data->currfid); return 0; } - pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " - "reqfid 0x%x\n", + pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqfid); @@ -409,15 +397,13 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) return 1; if (data->currfid != reqfid) { - printk(KERN_ERR PFX - "ph2: mismatch, failed fid transition, " - "curr 0x%x, req 0x%x\n", + pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", data->currfid, reqfid); return 1; } if (savevid != data->currvid) { - printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n", + pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n", savevid, data->currvid); return 1; } @@ -444,17 +430,14 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, return 1; if (savefid != data->currfid) { - printk(KERN_ERR PFX - "ph3: bad fid change, save 0x%x, curr 0x%x\n", - savefid, data->currfid); + pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n", + savefid, data->currfid); return 1; } if (data->currvid != reqvid) { - printk(KERN_ERR PFX - "ph3: failed vid transition\n, " - "req 0x%x, curr 0x%x", - reqvid, data->currvid); + pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x", + reqvid, data->currvid); return 1; } } @@ -498,23 +481,20 @@ static void check_supported_cpu(void *_rc) if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { - printk(KERN_INFO PFX - "Processor cpuid %x not supported\n", eax); + pr_info("Processor cpuid %x not supported\n", eax); return; } eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { - printk(KERN_INFO PFX - "No frequency change capabilities detected\n"); + pr_info("No frequency change capabilities detected\n"); return; } cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { - printk(KERN_INFO PFX - "Power state transitions not supported\n"); + pr_info("Power state transitions not supported\n"); return; } *rc = 0; @@ -529,43 +509,39 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, for (j = 0; j < data->numps; j++) { if (pst[j].vid > LEAST_VID) { - printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n", - j, pst[j].vid); + pr_err(FW_BUG "vid %d invalid : 0x%x\n", j, + pst[j].vid); return -EINVAL; } if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */ - printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate" - " %d\n", j); + pr_err(FW_BUG "0 vid exceeded with pstate %d\n", j); return -ENODEV; } if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */ - printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate" - " %d\n", j); + pr_err(FW_BUG "maxvid exceeded with pstate %d\n", j); return -ENODEV; } if (pst[j].fid > MAX_FID) { - printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate" - " %d\n", j); + pr_err(FW_BUG "maxfid exceeded with pstate %d\n", j); return -ENODEV; } if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { /* Only first fid is allowed to be in "low" range */ - printk(KERN_ERR FW_BUG PFX "two low fids - %d : " - "0x%x\n", j, pst[j].fid); + pr_err(FW_BUG "two low fids - %d : 0x%x\n", j, + pst[j].fid); return -EINVAL; } if (pst[j].fid < lastfid) lastfid = pst[j].fid; } if (lastfid & 1) { - printk(KERN_ERR FW_BUG PFX "lastfid invalid\n"); + pr_err(FW_BUG "lastfid invalid\n"); return -EINVAL; } if (lastfid > LO_FID_TABLE_TOP) - printk(KERN_INFO FW_BUG PFX - "first fid not from lo freq table\n"); + pr_info(FW_BUG "first fid not from lo freq table\n"); return 0; } @@ -582,16 +558,14 @@ static void print_basics(struct powernow_k8_data *data) for (j = 0; j < data->numps; j++) { if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) { - printk(KERN_INFO PFX - "fid 0x%x (%d MHz), vid 0x%x\n", - data->powernow_table[j].driver_data & 0xff, - data->powernow_table[j].frequency/1000, - data->powernow_table[j].driver_data >> 8); + pr_info("fid 0x%x (%d MHz), vid 0x%x\n", + data->powernow_table[j].driver_data & 0xff, + data->powernow_table[j].frequency/1000, + data->powernow_table[j].driver_data >> 8); } } if (data->batps) - printk(KERN_INFO PFX "Only %d pstates on battery\n", - data->batps); + pr_info("Only %d pstates on battery\n", data->batps); } static int fill_powernow_table(struct powernow_k8_data *data, @@ -602,31 +576,30 @@ static int fill_powernow_table(struct powernow_k8_data *data, if (data->batps) { /* use ACPI support to get full speed on mains power */ - printk(KERN_WARNING PFX - "Only %d pstates usable (use ACPI driver for full " - "range\n", data->batps); + pr_warn("Only %d pstates usable (use ACPI driver for full range\n", + data->batps); data->numps = data->batps; } for (j = 1; j < data->numps; j++) { if (pst[j-1].fid >= pst[j].fid) { - printk(KERN_ERR PFX "PST out of sequence\n"); + pr_err("PST out of sequence\n"); return -EINVAL; } } if (data->numps < 2) { - printk(KERN_ERR PFX "no p states to transition\n"); + pr_err("no p states to transition\n"); return -ENODEV; } if (check_pst_table(data, pst, maxvid)) return -EINVAL; - powernow_table = kmalloc((sizeof(*powernow_table) + powernow_table = kzalloc((sizeof(*powernow_table) * (data->numps + 1)), GFP_KERNEL); if (!powernow_table) { - printk(KERN_ERR PFX "powernow_table memory alloc failure\n"); + pr_err("powernow_table memory alloc failure\n"); return -ENOMEM; } @@ -681,13 +654,13 @@ static int find_psb_table(struct powernow_k8_data *data) pr_debug("table vers: 0x%x\n", psb->tableversion); if (psb->tableversion != PSB_VERSION_1_4) { - printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); + pr_err(FW_BUG "PSB table is not v1.4\n"); return -ENODEV; } pr_debug("flags: 0x%x\n", psb->flags1); if (psb->flags1) { - printk(KERN_ERR FW_BUG PFX "unknown flags\n"); + pr_err(FW_BUG "unknown flags\n"); return -ENODEV; } @@ -716,7 +689,7 @@ static int find_psb_table(struct powernow_k8_data *data) cpst = 1; } if (cpst != 1) { - printk(KERN_ERR FW_BUG PFX "numpst must be 1\n"); + pr_err(FW_BUG "numpst must be 1\n"); return -ENODEV; } @@ -742,9 +715,8 @@ static int find_psb_table(struct powernow_k8_data *data) * BIOS and Kernel Developer's Guide, which is available on * www.amd.com */ - printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n"); - printk(KERN_ERR PFX "Make sure that your BIOS is up to date" - " and Cool'N'Quiet support is enabled in BIOS setup\n"); + pr_err(FW_BUG "No PSB or ACPI _PSS objects\n"); + pr_err("Make sure that your BIOS is up to date and Cool'N'Quiet support is enabled in BIOS setup\n"); return -ENODEV; } @@ -793,7 +765,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) } /* fill in data->powernow_table */ - powernow_table = kmalloc((sizeof(*powernow_table) + powernow_table = kzalloc((sizeof(*powernow_table) * (data->acpi_data.state_count + 1)), GFP_KERNEL); if (!powernow_table) { pr_debug("powernow_table memory alloc failure\n"); @@ -810,7 +782,6 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; - powernow_table[data->acpi_data.state_count].driver_data = 0; data->powernow_table = powernow_table; if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) @@ -820,8 +791,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) acpi_processor_notify_smm(THIS_MODULE); if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { - printk(KERN_ERR PFX - "unable to alloc powernow_k8_data cpumask\n"); + pr_err("unable to alloc powernow_k8_data cpumask\n"); ret_val = -ENOMEM; goto err_out_mem; } @@ -886,9 +856,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, } if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { - printk(KERN_INFO PFX "invalid freq entries " - "%u kHz vs. %u kHz\n", freq, - (unsigned int) + pr_info("invalid freq entries %u kHz vs. %u kHz\n", + freq, (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); invalidate_entry(powernow_table, i); @@ -917,7 +886,7 @@ static int get_transition_latency(struct powernow_k8_data *data) max_latency = cur_latency; } if (max_latency == 0) { - pr_err(FW_WARN PFX "Invalid zero transition latency\n"); + pr_err(FW_WARN "Invalid zero transition latency\n"); max_latency = 1; } /* value in usecs, needs to be in nanoseconds */ @@ -963,34 +932,26 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, policy = cpufreq_cpu_get(smp_processor_id()); cpufreq_cpu_put(policy); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - + cpufreq_freq_transition_begin(policy, &freqs); res = transition_fid_vid(data, fid, vid); - if (res) - freqs.new = freqs.old; - else - freqs.new = find_khz_freq_from_fid(data->currfid); + cpufreq_freq_transition_end(policy, &freqs, res); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return res; } struct powernowk8_target_arg { struct cpufreq_policy *pol; - unsigned targfreq; - unsigned relation; + unsigned newstate; }; static long powernowk8_target_fn(void *arg) { struct powernowk8_target_arg *pta = arg; struct cpufreq_policy *pol = pta->pol; - unsigned targfreq = pta->targfreq; - unsigned relation = pta->relation; + unsigned newstate = pta->newstate; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; - unsigned int newstate; int ret; if (!data) @@ -1000,31 +961,27 @@ static long powernowk8_target_fn(void *arg) checkvid = data->currvid; if (pending_bit_stuck()) { - printk(KERN_ERR PFX "failing targ, change pending bit set\n"); + pr_err("failing targ, change pending bit set\n"); return -EIO; } - pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", - pol->cpu, targfreq, pol->min, pol->max, relation); + pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n", + pol->cpu, data->powernow_table[newstate].frequency, pol->min, + pol->max); if (query_current_values_with_pending_wait(data)) return -EIO; pr_debug("targ: curr fid 0x%x, vid 0x%x\n", - data->currfid, data->currvid); + data->currfid, data->currvid); if ((checkvid != data->currvid) || (checkfid != data->currfid)) { - pr_info(PFX - "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", + pr_info("error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", checkfid, data->currfid, checkvid, data->currvid); } - if (cpufreq_frequency_table_target(pol, data->powernow_table, - targfreq, relation, &newstate)) - return -EIO; - mutex_lock(&fidvid_mutex); powernow_k8_acpi_pst_values(data, newstate); @@ -1032,7 +989,7 @@ static long powernowk8_target_fn(void *arg) ret = transition_frequency_fidvid(data, newstate); if (ret) { - printk(KERN_ERR PFX "transition frequency failed\n"); + pr_err("transition frequency failed\n"); mutex_unlock(&fidvid_mutex); return 1; } @@ -1044,26 +1001,13 @@ static long powernowk8_target_fn(void *arg) } /* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, - unsigned targfreq, unsigned relation) +static int powernowk8_target(struct cpufreq_policy *pol, unsigned index) { - struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, - .relation = relation }; + struct powernowk8_target_arg pta = { .pol = pol, .newstate = index }; return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); } -/* Driver entry point to verify the policy and range of frequencies */ -static int powernowk8_verify(struct cpufreq_policy *pol) -{ - struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); - - if (!data) - return -EINVAL; - - return cpufreq_frequency_table_verify(pol, data->powernow_table); -} - struct init_on_cpu { struct powernow_k8_data *data; int rc; @@ -1074,7 +1018,7 @@ static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu) struct init_on_cpu *init_on_cpu = _init_on_cpu; if (pending_bit_stuck()) { - printk(KERN_ERR PFX "failing init, change pending bit set\n"); + pr_err("failing init, change pending bit set\n"); init_on_cpu->rc = -ENODEV; return; } @@ -1089,18 +1033,17 @@ static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu) init_on_cpu->rc = 0; } -static const char missing_pss_msg[] = - KERN_ERR - FW_BUG PFX "No compatible ACPI _PSS objects found.\n" - FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" - FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n"; +#define MISSING_PSS_MSG \ + FW_BUG "No compatible ACPI _PSS objects found.\n" \ + FW_BUG "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" \ + FW_BUG "If that doesn't help, try upgrading your BIOS.\n" /* per CPU init entry point to the driver */ static int powernowk8_cpu_init(struct cpufreq_policy *pol) { struct powernow_k8_data *data; struct init_on_cpu init_on_cpu; - int rc; + int rc, cpu; smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); if (rc) @@ -1108,7 +1051,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { - printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); + pr_err("unable to alloc powernow_k8_data"); return -ENOMEM; } @@ -1120,13 +1063,11 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { - printk_once(missing_pss_msg); + pr_err_once(MISSING_PSS_MSG); goto err_out; } if (pol->cpu != 0) { - printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " - "CPU other than CPU0. Complain to your BIOS " - "vendor.\n"); + pr_err(FW_BUG "No ACPI _PSS objects for CPU other than CPU0. Complain to your BIOS vendor.\n"); goto err_out; } rc = find_psb_table(data); @@ -1152,24 +1093,21 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); data->available_cores = pol->cpus; - pol->cur = find_khz_freq_from_fid(data->currfid); - pr_debug("policy current frequency %d kHz\n", pol->cur); - /* min/max the cpu is capable of */ - if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { - printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); + if (cpufreq_table_validate_and_show(pol, data->powernow_table)) { + pr_err(FW_BUG "invalid powernow_table\n"); powernow_k8_cpu_exit_acpi(data); kfree(data->powernow_table); kfree(data); return -EINVAL; } - cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); - pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", - data->currfid, data->currvid); + data->currfid, data->currvid); - per_cpu(powernow_data, pol->cpu) = data; + /* Point all the CPUs in this policy to the same data */ + for_each_cpu(cpu, pol->cpus) + per_cpu(powernow_data, cpu) = data; return 0; @@ -1184,17 +1122,17 @@ err_out: static int powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); + int cpu; if (!data) return -EINVAL; powernow_k8_cpu_exit_acpi(data); - cpufreq_frequency_table_put_attr(pol->cpu); - kfree(data->powernow_table); kfree(data); - per_cpu(powernow_data, pol->cpu) = NULL; + for_each_cpu(cpu, pol->cpus) + per_cpu(powernow_data, cpu) = NULL; return 0; } @@ -1227,20 +1165,16 @@ out: return khz; } -static struct freq_attr *powernow_k8_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver cpufreq_amd64_driver = { - .verify = powernowk8_verify, - .target = powernowk8_target, + .flags = CPUFREQ_ASYNC_NOTIFICATION, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernowk8_target, .bios_limit = acpi_processor_get_bios_limit, .init = powernowk8_cpu_init, .exit = powernowk8_cpu_exit, .get = powernowk8_get, .name = "powernow-k8", - .attr = powernow_k8_attr, + .attr = cpufreq_generic_attr, }; static void __request_acpi_cpufreq(void) @@ -1252,12 +1186,12 @@ static void __request_acpi_cpufreq(void) goto request; if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv)))) - pr_warn(PFX "WTF driver: %s\n", cur_drv); + pr_warn("WTF driver: %s\n", cur_drv); return; request: - pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n"); + pr_warn("This CPU is not supported anymore, using acpi-cpufreq instead.\n"); request_module(drv); } @@ -1292,7 +1226,7 @@ static int powernowk8_init(void) if (ret) return ret; - pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", + pr_info("Found %d %s (%d cpu cores) (" VERSION ")\n", num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); return ret; @@ -1306,8 +1240,8 @@ static void __exit powernowk8_exit(void) cpufreq_unregister_driver(&cpufreq_amd64_driver); } -MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and " - "Mark Langsdorf <mark.langsdorf@amd.com>"); +MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>"); +MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>"); MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h index 79329d4d5ab..45ce11e8662 100644 --- a/drivers/cpufreq/powernow-k8.h +++ b/drivers/cpufreq/powernow-k8.h @@ -19,7 +19,7 @@ struct powernow_k8_data { u32 vidmvs; /* usable value calculated from mvs */ u32 vstable; /* voltage stabilization time, units 20 us */ u32 plllock; /* pll lock time, units 1 us */ - u32 exttype; /* extended interface = 1 */ + u32 exttype; /* extended interface = 1 */ /* keep track of the current fid / vid or pstate */ u32 currvid; diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c new file mode 100644 index 00000000000..bb1d08dc8cc --- /dev/null +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -0,0 +1,342 @@ +/* + * POWERNV cpufreq driver for the IBM POWER processors + * + * (C) Copyright IBM 2014 + * + * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "powernv-cpufreq: " fmt + +#include <linux/kernel.h> +#include <linux/sysfs.h> +#include <linux/cpumask.h> +#include <linux/module.h> +#include <linux/cpufreq.h> +#include <linux/smp.h> +#include <linux/of.h> + +#include <asm/cputhreads.h> +#include <asm/reg.h> +#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ + +#define POWERNV_MAX_PSTATES 256 + +static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; + +/* + * Note: The set of pstates consists of contiguous integers, the + * smallest of which is indicated by powernv_pstate_info.min, the + * largest of which is indicated by powernv_pstate_info.max. + * + * The nominal pstate is the highest non-turbo pstate in this + * platform. This is indicated by powernv_pstate_info.nominal. + */ +static struct powernv_pstate_info { + int min; + int max; + int nominal; + int nr_pstates; +} powernv_pstate_info; + +/* + * Initialize the freq table based on data obtained + * from the firmware passed via device-tree + */ +static int init_powernv_pstates(void) +{ + struct device_node *power_mgt; + int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0; + const __be32 *pstate_ids, *pstate_freqs; + u32 len_ids, len_freqs; + + power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); + if (!power_mgt) { + pr_warn("power-mgt node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) { + pr_warn("ibm,pstate-min node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) { + pr_warn("ibm,pstate-max node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-nominal", + &pstate_nominal)) { + pr_warn("ibm,pstate-nominal not found\n"); + return -ENODEV; + } + pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min, + pstate_nominal, pstate_max); + + pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids); + if (!pstate_ids) { + pr_warn("ibm,pstate-ids not found\n"); + return -ENODEV; + } + + pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz", + &len_freqs); + if (!pstate_freqs) { + pr_warn("ibm,pstate-frequencies-mhz not found\n"); + return -ENODEV; + } + + WARN_ON(len_ids != len_freqs); + nr_pstates = min(len_ids, len_freqs) / sizeof(u32); + if (!nr_pstates) { + pr_warn("No PStates found\n"); + return -ENODEV; + } + + pr_debug("NR PStates %d\n", nr_pstates); + for (i = 0; i < nr_pstates; i++) { + u32 id = be32_to_cpu(pstate_ids[i]); + u32 freq = be32_to_cpu(pstate_freqs[i]); + + pr_debug("PState id %d freq %d MHz\n", id, freq); + powernv_freqs[i].frequency = freq * 1000; /* kHz */ + powernv_freqs[i].driver_data = id; + } + /* End of list marker entry */ + powernv_freqs[i].frequency = CPUFREQ_TABLE_END; + + powernv_pstate_info.min = pstate_min; + powernv_pstate_info.max = pstate_max; + powernv_pstate_info.nominal = pstate_nominal; + powernv_pstate_info.nr_pstates = nr_pstates; + + return 0; +} + +/* Returns the CPU frequency corresponding to the pstate_id. */ +static unsigned int pstate_id_to_freq(int pstate_id) +{ + int i; + + i = powernv_pstate_info.max - pstate_id; + BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0); + + return powernv_freqs[i].frequency; +} + +/* + * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by + * the firmware + */ +static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy, + char *buf) +{ + return sprintf(buf, "%u\n", + pstate_id_to_freq(powernv_pstate_info.nominal)); +} + +struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq = + __ATTR_RO(cpuinfo_nominal_freq); + +static struct freq_attr *powernv_cpu_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + &cpufreq_freq_attr_cpuinfo_nominal_freq, + NULL, +}; + +/* Helper routines */ + +/* Access helpers to power mgt SPR */ + +static inline unsigned long get_pmspr(unsigned long sprn) +{ + switch (sprn) { + case SPRN_PMCR: + return mfspr(SPRN_PMCR); + + case SPRN_PMICR: + return mfspr(SPRN_PMICR); + + case SPRN_PMSR: + return mfspr(SPRN_PMSR); + } + BUG(); +} + +static inline void set_pmspr(unsigned long sprn, unsigned long val) +{ + switch (sprn) { + case SPRN_PMCR: + mtspr(SPRN_PMCR, val); + return; + + case SPRN_PMICR: + mtspr(SPRN_PMICR, val); + return; + } + BUG(); +} + +/* + * Use objects of this type to query/update + * pstates on a remote CPU via smp_call_function. + */ +struct powernv_smp_call_data { + unsigned int freq; + int pstate_id; +}; + +/* + * powernv_read_cpu_freq: Reads the current frequency on this CPU. + * + * Called via smp_call_function. + * + * Note: The caller of the smp_call_function should pass an argument of + * the type 'struct powernv_smp_call_data *' along with this function. + * + * The current frequency on this CPU will be returned via + * ((struct powernv_smp_call_data *)arg)->freq; + */ +static void powernv_read_cpu_freq(void *arg) +{ + unsigned long pmspr_val; + s8 local_pstate_id; + struct powernv_smp_call_data *freq_data = arg; + + pmspr_val = get_pmspr(SPRN_PMSR); + + /* + * The local pstate id corresponds bits 48..55 in the PMSR. + * Note: Watch out for the sign! + */ + local_pstate_id = (pmspr_val >> 48) & 0xFF; + freq_data->pstate_id = local_pstate_id; + freq_data->freq = pstate_id_to_freq(freq_data->pstate_id); + + pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n", + raw_smp_processor_id(), pmspr_val, freq_data->pstate_id, + freq_data->freq); +} + +/* + * powernv_cpufreq_get: Returns the CPU frequency as reported by the + * firmware for CPU 'cpu'. This value is reported through the sysfs + * file cpuinfo_cur_freq. + */ +static unsigned int powernv_cpufreq_get(unsigned int cpu) +{ + struct powernv_smp_call_data freq_data; + + smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq, + &freq_data, 1); + + return freq_data.freq; +} + +/* + * set_pstate: Sets the pstate on this CPU. + * + * This is called via an smp_call_function. + * + * The caller must ensure that freq_data is of the type + * (struct powernv_smp_call_data *) and the pstate_id which needs to be set + * on this CPU should be present in freq_data->pstate_id. + */ +static void set_pstate(void *freq_data) +{ + unsigned long val; + unsigned long pstate_ul = + ((struct powernv_smp_call_data *) freq_data)->pstate_id; + + val = get_pmspr(SPRN_PMCR); + val = val & 0x0000FFFFFFFFFFFFULL; + + pstate_ul = pstate_ul & 0xFF; + + /* Set both global(bits 56..63) and local(bits 48..55) PStates */ + val = val | (pstate_ul << 56) | (pstate_ul << 48); + + pr_debug("Setting cpu %d pmcr to %016lX\n", + raw_smp_processor_id(), val); + set_pmspr(SPRN_PMCR, val); +} + +/* + * powernv_cpufreq_target_index: Sets the frequency corresponding to + * the cpufreq table entry indexed by new_index on the cpus in the + * mask policy->cpus + */ +static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, + unsigned int new_index) +{ + struct powernv_smp_call_data freq_data; + + freq_data.pstate_id = powernv_freqs[new_index].driver_data; + + /* + * Use smp_call_function to send IPI and execute the + * mtspr on target CPU. We could do that without IPI + * if current CPU is within policy->cpus (core) + */ + smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); + + return 0; +} + +static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + int base, i; + + base = cpu_first_thread_sibling(policy->cpu); + + for (i = 0; i < threads_per_core; i++) + cpumask_set_cpu(base + i, policy->cpus); + + return cpufreq_table_validate_and_show(policy, powernv_freqs); +} + +static struct cpufreq_driver powernv_cpufreq_driver = { + .name = "powernv-cpufreq", + .flags = CPUFREQ_CONST_LOOPS, + .init = powernv_cpufreq_cpu_init, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernv_cpufreq_target_index, + .get = powernv_cpufreq_get, + .attr = powernv_cpu_freq_attr, +}; + +static int __init powernv_cpufreq_init(void) +{ + int rc = 0; + + /* Discover pstates from device tree and init */ + rc = init_powernv_pstates(); + if (rc) { + pr_info("powernv-cpufreq disabled. System does not support PState control\n"); + return rc; + } + + return cpufreq_register_driver(&powernv_cpufreq_driver); +} +module_init(powernv_cpufreq_init); + +static void __exit powernv_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&powernv_cpufreq_driver); +} +module_exit(powernv_cpufreq_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>"); diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index 60e81d524ea..3607070797a 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c @@ -13,7 +13,6 @@ #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/errno.h> -#include <sysdev/fsl_soc.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> @@ -21,15 +20,14 @@ #include <linux/of.h> #include <linux/slab.h> #include <linux/smp.h> +#include <sysdev/fsl_soc.h> /** * struct cpu_data - per CPU data struct - * @clk: the clk of CPU * @parent: the parent node of cpu clock * @table: frequency table */ struct cpu_data { - struct clk *clk; struct device_node *parent; struct cpufreq_frequency_table *table; }; @@ -69,8 +67,6 @@ static const struct soc_data sdata[] = { static u32 min_cpufreq; static const u32 *fmask; -/* serialize frequency changes */ -static DEFINE_MUTEX(cpufreq_lock); static DEFINE_PER_CPU(struct cpu_data *, cpu_data); /* cpumask in a cluster */ @@ -83,13 +79,6 @@ static inline const struct cpumask *cpu_core_mask(int cpu) } #endif -static unsigned int corenet_cpufreq_get_speed(unsigned int cpu) -{ - struct cpu_data *data = per_cpu(cpu_data, cpu); - - return clk_get_rate(data->clk) / 1000; -} - /* reduce the duplicated frequencies in frequency table */ static void freq_table_redup(struct cpufreq_frequency_table *freq_table, int count) @@ -149,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) struct cpufreq_frequency_table *table; struct cpu_data *data; unsigned int cpu = policy->cpu; + u64 u64temp; np = of_get_cpu_node(cpu, NULL); if (!np) @@ -160,8 +150,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_np; } - data->clk = of_clk_get(np, 0); - if (IS_ERR(data->clk)) { + policy->clk = of_clk_get(np, 0); + if (IS_ERR(policy->clk)) { pr_err("%s: no clock information\n", __func__); goto err_nomem2; } @@ -202,7 +192,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) table[i].frequency = CPUFREQ_TABLE_END; /* set the min and max frequency properly */ - ret = cpufreq_frequency_table_cpuinfo(policy, table); + ret = cpufreq_table_validate_and_show(policy, table); if (ret) { pr_err("invalid frequency table: %d\n", ret); goto err_nomem1; @@ -216,10 +206,11 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) for_each_cpu(i, per_cpu(cpu_mask, cpu)) per_cpu(cpu_data, i) = data; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = corenet_cpufreq_get_speed(policy->cpu); + /* Minimum transition latency is 12 platform clocks */ + u64temp = 12ULL * NSEC_PER_SEC; + do_div(u64temp, fsl_get_sys_freq()); + policy->cpuinfo.transition_latency = u64temp + 1; - cpufreq_frequency_table_get_attr(table, cpu); of_node_put(np); return 0; @@ -242,7 +233,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy) struct cpu_data *data = per_cpu(cpu_data, policy->cpu); unsigned int cpu; - cpufreq_frequency_table_put_attr(policy->cpu); of_node_put(data->parent); kfree(data->table); kfree(data); @@ -253,60 +243,25 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy) return 0; } -static int corenet_cpufreq_verify(struct cpufreq_policy *policy) -{ - struct cpufreq_frequency_table *table = - per_cpu(cpu_data, policy->cpu)->table; - - return cpufreq_frequency_table_verify(policy, table); -} - static int corenet_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { - struct cpufreq_freqs freqs; - unsigned int new; struct clk *parent; - int ret; struct cpu_data *data = per_cpu(cpu_data, policy->cpu); - cpufreq_frequency_table_target(policy, data->table, - target_freq, relation, &new); - - if (policy->cur == data->table[new].frequency) - return 0; - - freqs.old = policy->cur; - freqs.new = data->table[new].frequency; - - mutex_lock(&cpufreq_lock); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - parent = of_clk_get(data->parent, data->table[new].driver_data); - ret = clk_set_parent(data->clk, parent); - if (ret) - freqs.new = freqs.old; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - mutex_unlock(&cpufreq_lock); - - return ret; + parent = of_clk_get(data->parent, data->table[index].driver_data); + return clk_set_parent(policy->clk, parent); } -static struct freq_attr *corenet_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver ppc_corenet_cpufreq_driver = { .name = "ppc_cpufreq", .flags = CPUFREQ_CONST_LOOPS, .init = corenet_cpufreq_cpu_init, .exit = __exit_p(corenet_cpufreq_cpu_exit), - .verify = corenet_cpufreq_verify, - .target = corenet_cpufreq_target, - .get = corenet_cpufreq_get_speed, - .attr = corenet_cpufreq_attr, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = corenet_cpufreq_target, + .get = cpufreq_generic_get, + .attr = cpufreq_generic_attr, }; static const struct of_device_id node_matches[] __initdata = { diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index 2e448f0bbdc..5a4c5a639f6 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -30,20 +30,17 @@ #include "ppc_cbe_cpufreq.h" -static DEFINE_MUTEX(cbe_switch_mutex); - - /* the CBE supports an 8 step frequency scaling */ static struct cpufreq_frequency_table cbe_freqs[] = { - {1, 0}, - {2, 0}, - {3, 0}, - {4, 0}, - {5, 0}, - {6, 0}, - {8, 0}, - {10, 0}, - {0, CPUFREQ_TABLE_END}, + {0, 1, 0}, + {0, 2, 0}, + {0, 3, 0}, + {0, 4, 0}, + {0, 5, 0}, + {0, 6, 0}, + {0, 8, 0}, + {0, 10, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; /* @@ -70,9 +67,10 @@ static int set_pmode(unsigned int cpu, unsigned int slow_mode) static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) { + struct cpufreq_frequency_table *pos; const u32 *max_freqp; u32 max_freq; - int i, cur_pmode; + int cur_pmode; struct device_node *cpu; cpu = of_get_cpu_node(policy->cpu, NULL); @@ -105,9 +103,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_debug("initializing frequency table\n"); /* initialize frequency table */ - for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { - cbe_freqs[i].frequency = max_freq / cbe_freqs[i].driver_data; - pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); + cpufreq_for_each_entry(pos, cbe_freqs) { + pos->frequency = max_freq / pos->driver_data; + pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency); } /* if DEBUG is enabled set_pmode() measures the latency @@ -123,63 +121,27 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif - cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); - /* this ensures that policy->cpuinfo_min * and policy->cpuinfo_max are set correctly */ - return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); -} - -static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static int cbe_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, cbe_freqs); + return cpufreq_table_validate_and_show(policy, cbe_freqs); } static int cbe_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int cbe_pmode_new) { - int rc; - struct cpufreq_freqs freqs; - unsigned int cbe_pmode_new; - - cpufreq_frequency_table_target(policy, - cbe_freqs, - target_freq, - relation, - &cbe_pmode_new); - - freqs.old = policy->cur; - freqs.new = cbe_freqs[cbe_pmode_new].frequency; - - mutex_lock(&cbe_switch_mutex); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - pr_debug("setting frequency for cpu %d to %d kHz, " \ "1/%d of max frequency\n", policy->cpu, cbe_freqs[cbe_pmode_new].frequency, cbe_freqs[cbe_pmode_new].driver_data); - rc = set_pmode(policy->cpu, cbe_pmode_new); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - mutex_unlock(&cbe_switch_mutex); - - return rc; + return set_pmode(policy->cpu, cbe_pmode_new); } static struct cpufreq_driver cbe_cpufreq_driver = { - .verify = cbe_cpufreq_verify, - .target = cbe_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cbe_cpufreq_target, .init = cbe_cpufreq_cpu_init, - .exit = cbe_cpufreq_cpu_exit, .name = "cbe-cpufreq", .flags = CPUFREQ_CONST_LOOPS, }; diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index 8749eaf1879..e24269ab4e9 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c @@ -262,36 +262,15 @@ static u32 mdrefr_dri(unsigned int freq) return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; } -/* find a valid frequency point */ -static int pxa_verify_policy(struct cpufreq_policy *policy) -{ - struct cpufreq_frequency_table *pxa_freqs_table; - pxa_freqs_t *pxa_freqs; - int ret; - - find_freq_tables(&pxa_freqs_table, &pxa_freqs); - ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table); - - if (freq_debug) - pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n", - policy->min, policy->max); - - return ret; -} - static unsigned int pxa_cpufreq_get(unsigned int cpu) { return get_clk_frequency_khz(0); } -static int pxa_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx) { struct cpufreq_frequency_table *pxa_freqs_table; pxa_freqs_t *pxa_freq_settings; - struct cpufreq_freqs freqs; - unsigned int idx; unsigned long flags; unsigned int new_freq_cpu, new_freq_mem; unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; @@ -300,32 +279,19 @@ static int pxa_set_target(struct cpufreq_policy *policy, /* Get the current policy */ find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, pxa_freqs_table, - target_freq, relation, &idx)) { - return -EINVAL; - } - new_freq_cpu = pxa_freq_settings[idx].khz; new_freq_mem = pxa_freq_settings[idx].membus; - freqs.old = policy->cur; - freqs.new = new_freq_cpu; if (freq_debug) pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", - freqs.new / 1000, (pxa_freq_settings[idx].div2) ? + new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ? (new_freq_mem / 2000) : (new_freq_mem / 1000)); - if (vcc_core && freqs.new > freqs.old) + if (vcc_core && new_freq_cpu > policy->cur) { ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); - if (ret) - return ret; - /* - * Tell everyone what we're about to do... - * you should add a notify client with any platform specific - * Vcc changing capability - */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + if (ret) + return ret; + } /* Calculate the next MDREFR. If we're slowing down the SDRAM clock * we need to preset the smaller DRI before the change. If we're @@ -376,13 +342,6 @@ static int pxa_set_target(struct cpufreq_policy *policy, local_irq_restore(flags); /* - * Tell everyone what we've just done... - * you should add a notify client with any platform specific - * SDRAM refresh timer adjustments - */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - /* * Even if voltage setting fails, we don't report it, as the frequency * change succeeded. The voltage reduction is not a critical failure, * only power savings will suffer from this. @@ -391,7 +350,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, * bug is triggered (seems a deadlock). Should anybody find out where, * the "return 0" should become a "return ret". */ - if (vcc_core && freqs.new < freqs.old) + if (vcc_core && new_freq_cpu < policy->cur) ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); return 0; @@ -414,8 +373,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) /* set default policy and cpuinfo */ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->cur = get_clk_frequency_khz(0); /* current freq */ - policy->min = policy->max = policy->cur; /* Generate pxa25x the run cpufreq_frequency_table struct */ for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { @@ -453,10 +410,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) find_freq_tables(&pxa255_freq_table, &pxa255_freqs); pr_info("PXA255 cpufreq using %s frequency table\n", pxa255_turbo_table ? "turbo" : "run"); - cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table); + + cpufreq_table_validate_and_show(policy, pxa255_freq_table); + } + else if (cpu_is_pxa27x()) { + cpufreq_table_validate_and_show(policy, pxa27x_freq_table); } - else if (cpu_is_pxa27x()) - cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table); printk(KERN_INFO "PXA CPU frequency change support initialized\n"); @@ -464,8 +423,9 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) } static struct cpufreq_driver pxa_cpufreq_driver = { - .verify = pxa_verify_policy, - .target = pxa_set_target, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = pxa_set_target, .init = pxa_cpufreq_init, .get = pxa_cpufreq_get, .name = "PXA2xx", diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c index d26306fb00d..a0127590038 100644 --- a/drivers/cpufreq/pxa3xx-cpufreq.c +++ b/drivers/cpufreq/pxa3xx-cpufreq.c @@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy, pxa3xx_freqs_num = num; pxa3xx_freqs_table = table; - return cpufreq_frequency_table_cpuinfo(policy, table); + return cpufreq_table_validate_and_show(policy, table); } static void __update_core_freq(struct pxa3xx_freq_info *info) @@ -150,54 +150,26 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info) cpu_relax(); } -static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table); -} - static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) { return pxa3xx_get_clk_frequency_khz(0); } -static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index) { struct pxa3xx_freq_info *next; - struct cpufreq_freqs freqs; unsigned long flags; - int idx; if (policy->cpu != 0) return -EINVAL; - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table, - target_freq, relation, &idx)) - return -EINVAL; - - next = &pxa3xx_freqs[idx]; - - freqs.old = policy->cur; - freqs.new = next->cpufreq_mhz * 1000; - - pr_debug("CPU frequency from %d MHz to %d MHz%s\n", - freqs.old / 1000, freqs.new / 1000, - (freqs.old == freqs.new) ? " (skipped)" : ""); - - if (freqs.old == target_freq) - return 0; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + next = &pxa3xx_freqs[index]; local_irq_save(flags); __update_core_freq(next); __update_bus_freq(next); local_irq_restore(flags); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return 0; } @@ -206,11 +178,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) int ret = -EINVAL; /* set default policy and cpuinfo */ - policy->cpuinfo.min_freq = 104000; - policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000; + policy->min = policy->cpuinfo.min_freq = 104000; + policy->max = policy->cpuinfo.max_freq = + (cpu_is_pxa320()) ? 806000 : 624000; policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->max = pxa3xx_get_clk_frequency_khz(0); - policy->cur = policy->min = policy->max; if (cpu_is_pxa300() || cpu_is_pxa310()) ret = setup_freqs_table(policy, pxa300_freqs, @@ -230,8 +201,9 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) } static struct cpufreq_driver pxa3xx_cpufreq_driver = { - .verify = pxa3xx_cpufreq_verify, - .target = pxa3xx_cpufreq_set, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = pxa3xx_cpufreq_set, .init = pxa3xx_cpufreq_init, .get = pxa3xx_cpufreq_get, .name = "pxa3xx-cpufreq", diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 22dcb81ef9d..2fd53eaaec2 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -72,31 +72,21 @@ static struct s3c2416_dvfs s3c2416_dvfs_table[] = { #endif static struct cpufreq_frequency_table s3c2416_freq_table[] = { - { SOURCE_HCLK, FREQ_DVS }, - { SOURCE_ARMDIV, 133333 }, - { SOURCE_ARMDIV, 266666 }, - { SOURCE_ARMDIV, 400000 }, - { 0, CPUFREQ_TABLE_END }, + { 0, SOURCE_HCLK, FREQ_DVS }, + { 0, SOURCE_ARMDIV, 133333 }, + { 0, SOURCE_ARMDIV, 266666 }, + { 0, SOURCE_ARMDIV, 400000 }, + { 0, 0, CPUFREQ_TABLE_END }, }; static struct cpufreq_frequency_table s3c2450_freq_table[] = { - { SOURCE_HCLK, FREQ_DVS }, - { SOURCE_ARMDIV, 133500 }, - { SOURCE_ARMDIV, 267000 }, - { SOURCE_ARMDIV, 534000 }, - { 0, CPUFREQ_TABLE_END }, + { 0, SOURCE_HCLK, FREQ_DVS }, + { 0, SOURCE_ARMDIV, 133500 }, + { 0, SOURCE_ARMDIV, 267000 }, + { 0, SOURCE_ARMDIV, 534000 }, + { 0, 0, CPUFREQ_TABLE_END }, }; -static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy) -{ - struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; - - if (policy->cpu != 0) - return -EINVAL; - - return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table); -} - static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu) { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; @@ -227,24 +217,15 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx) } static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; - struct cpufreq_freqs freqs; + unsigned int new_freq; int idx, ret, to_dvs = 0; - unsigned int i; mutex_lock(&cpufreq_lock); - pr_debug("cpufreq: to %dKHz, relation %d\n", target_freq, relation); - - ret = cpufreq_frequency_table_target(policy, s3c_freq->freq_table, - target_freq, relation, &i); - if (ret != 0) - goto out; - - idx = s3c_freq->freq_table[i].driver_data; + idx = s3c_freq->freq_table[index].driver_data; if (idx == SOURCE_HCLK) to_dvs = 1; @@ -256,24 +237,13 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, goto out; } - freqs.flags = 0; - freqs.old = s3c_freq->is_dvs ? FREQ_DVS - : clk_get_rate(s3c_freq->armclk) / 1000; - /* When leavin dvs mode, always switch the armdiv to the hclk rate * The S3C2416 has stability issues when switching directly to * higher frequencies. */ - freqs.new = (s3c_freq->is_dvs && !to_dvs) + new_freq = (s3c_freq->is_dvs && !to_dvs) ? clk_get_rate(s3c_freq->hclk) / 1000 - : s3c_freq->freq_table[i].frequency; - - pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); - - if (!to_dvs && freqs.old == freqs.new) - goto out; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + : s3c_freq->freq_table[index].frequency; if (to_dvs) { pr_debug("cpufreq: enter dvs\n"); @@ -282,12 +252,10 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, pr_debug("cpufreq: leave dvs\n"); ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx); } else { - pr_debug("cpufreq: change armdiv to %dkHz\n", freqs.new); - ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new); + pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq); + ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq); } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - out: mutex_unlock(&cpufreq_lock); @@ -298,7 +266,7 @@ out: static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) { int count, v, i, found; - struct cpufreq_frequency_table *freq; + struct cpufreq_frequency_table *pos; struct s3c2416_dvfs *dvfs; count = regulator_count_voltages(s3c_freq->vddarm); @@ -307,12 +275,11 @@ static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) return; } - freq = s3c_freq->freq_table; - while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) { - if (freq->frequency == CPUFREQ_ENTRY_INVALID) - continue; + if (!count) + goto out; - dvfs = &s3c2416_dvfs_table[freq->driver_data]; + cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) { + dvfs = &s3c2416_dvfs_table[pos->driver_data]; found = 0; /* Check only the min-voltage, more is always ok on S3C2416 */ @@ -324,13 +291,12 @@ static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) if (!found) { pr_debug("cpufreq: %dkHz unsupported by regulator\n", - freq->frequency); - freq->frequency = CPUFREQ_ENTRY_INVALID; + pos->frequency); + pos->frequency = CPUFREQ_ENTRY_INVALID; } - - freq++; } +out: /* Guessed */ s3c_freq->regulator_latency = 1 * 1000 * 1000; } @@ -370,7 +336,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = { static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; - struct cpufreq_frequency_table *freq; + struct cpufreq_frequency_table *pos; struct clk *msysclk; unsigned long rate; int ret; @@ -459,47 +425,37 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) s3c_freq->regulator_latency = 0; #endif - freq = s3c_freq->freq_table; - while (freq->frequency != CPUFREQ_TABLE_END) { + cpufreq_for_each_entry(pos, s3c_freq->freq_table) { /* special handling for dvs mode */ - if (freq->driver_data == 0) { + if (pos->driver_data == 0) { if (!s3c_freq->hclk) { pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n", - freq->frequency); - freq->frequency = CPUFREQ_ENTRY_INVALID; + pos->frequency); + pos->frequency = CPUFREQ_ENTRY_INVALID; } else { - freq++; continue; } } /* Check for frequencies we can generate */ rate = clk_round_rate(s3c_freq->armdiv, - freq->frequency * 1000); + pos->frequency * 1000); rate /= 1000; - if (rate != freq->frequency) { + if (rate != pos->frequency) { pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n", - freq->frequency, rate); - freq->frequency = CPUFREQ_ENTRY_INVALID; + pos->frequency, rate); + pos->frequency = CPUFREQ_ENTRY_INVALID; } - - freq++; } - policy->cur = clk_get_rate(s3c_freq->armclk) / 1000; - /* Datasheet says PLL stabalisation time must be at least 300us, * so but add some fudge. (reference in LOCKCON0 register description) */ - policy->cpuinfo.transition_latency = (500 * 1000) + - s3c_freq->regulator_latency; - - ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table); + ret = cpufreq_generic_init(policy, s3c_freq->freq_table, + (500 * 1000) + s3c_freq->regulator_latency); if (ret) goto err_freq_table; - cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0); - register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier); return 0; @@ -518,19 +474,14 @@ err_hclk: return ret; } -static struct freq_attr *s3c2416_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver s3c2416_cpufreq_driver = { - .flags = 0, - .verify = s3c2416_cpufreq_verify_speed, - .target = s3c2416_cpufreq_set_target, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = s3c2416_cpufreq_set_target, .get = s3c2416_cpufreq_get_speed, .init = s3c2416_cpufreq_driver_init, .name = "s3c2416", - .attr = s3c2416_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int __init s3c2416_cpufreq_init(void) diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c index 72b2cc8a5a8..f84ed10755b 100644 --- a/drivers/cpufreq/s3c2440-cpufreq.c +++ b/drivers/cpufreq/s3c2440-cpufreq.c @@ -22,8 +22,6 @@ #include <linux/err.h> #include <linux/io.h> -#include <mach/hardware.h> - #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -55,7 +53,7 @@ static inline int within_khz(unsigned long a, unsigned long b) * specified in @cfg. The values are stored in @cfg for later use * by the relevant set routine if the request settings can be reached. */ -int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) { unsigned int hdiv, pdiv; unsigned long hclk, fclk, armclk; @@ -242,7 +240,7 @@ static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg, return ret; } -struct s3c_cpufreq_info s3c2440_cpufreq_info = { +static struct s3c_cpufreq_info s3c2440_cpufreq_info = { .max = { .fclk = 400000000, .hclk = 133333333, diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index b0f343fcb7e..227ebf7c1ee 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -141,6 +141,7 @@ static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg) { + cfg->mpll = _clk_mpll; (cfg->info->set_fvco)(cfg); } @@ -217,7 +218,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); /* start the frequency change */ - cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs.freqs); /* If hclk is staying the same, then we do not need to * re-write the IO or the refresh timings whilst we are changing @@ -261,7 +262,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, local_irq_restore(flags); /* notify everyone we've done this */ - cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs.freqs, 0); s3c_freq_dbg("%s: finished\n", __func__); return 0; @@ -355,11 +356,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy, return -EINVAL; } -static unsigned int s3c_cpufreq_get(unsigned int cpu) -{ - return clk_get_rate(clk_arm) / 1000; -} - struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) { struct clk *clk; @@ -373,23 +369,8 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) static int s3c_cpufreq_init(struct cpufreq_policy *policy) { - printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy); - - if (policy->cpu != 0) - return -EINVAL; - - policy->cur = s3c_cpufreq_get(0); - policy->min = policy->cpuinfo.min_freq = 0; - policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000; - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; - - /* feed the latency information from the cpu driver */ - policy->cpuinfo.transition_latency = cpu_cur.info->latency; - - if (ftab) - cpufreq_frequency_table_cpuinfo(policy, ftab); - - return 0; + policy->clk = clk_arm; + return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); } static int __init s3c_cpufreq_initclks(void) @@ -416,14 +397,6 @@ static int __init s3c_cpufreq_initclks(void) return 0; } -static int s3c_cpufreq_verify(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - return 0; -} - #ifdef CONFIG_PM static struct cpufreq_frequency_table suspend_pll; static unsigned int suspend_freq; @@ -432,7 +405,7 @@ static int s3c_cpufreq_suspend(struct cpufreq_policy *policy) { suspend_pll.frequency = clk_get_rate(_clk_mpll); suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON); - suspend_freq = s3c_cpufreq_get(0) * 1000; + suspend_freq = clk_get_rate(clk_arm); return 0; } @@ -472,10 +445,9 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy) #endif static struct cpufreq_driver s3c24xx_driver = { - .flags = CPUFREQ_STICKY, - .verify = s3c_cpufreq_verify, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, .target = s3c_cpufreq_target, - .get = s3c_cpufreq_get, + .get = cpufreq_generic_get, .init = s3c_cpufreq_init, .suspend = s3c_cpufreq_suspend, .resume = s3c_cpufreq_resume, @@ -534,7 +506,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board) return 0; } -int __init s3c_cpufreq_auto_io(void) +static int __init s3c_cpufreq_auto_io(void) { int ret; @@ -615,7 +587,7 @@ static int s3c_cpufreq_build_freq(void) size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0); size++; - ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL); + ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL); if (!ftab) { printk(KERN_ERR "%s: no memory for tables\n", __func__); return -ENOMEM; @@ -693,7 +665,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls, size = sizeof(*vals) * (plls_no + 1); - vals = kmalloc(size, GFP_KERNEL); + vals = kzalloc(size, GFP_KERNEL); if (vals) { memcpy(vals, plls, size); pll_reg = vals; diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index 8a72b0c555f..176e84cc399 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c @@ -19,7 +19,6 @@ #include <linux/regulator/consumer.h> #include <linux/module.h> -static struct clk *armclk; static struct regulator *vddarm; static unsigned long regulator_latency; @@ -38,115 +37,73 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = { }; static struct cpufreq_frequency_table s3c64xx_freq_table[] = { - { 0, 66000 }, - { 0, 100000 }, - { 0, 133000 }, - { 1, 200000 }, - { 1, 222000 }, - { 1, 266000 }, - { 2, 333000 }, - { 2, 400000 }, - { 2, 532000 }, - { 2, 533000 }, - { 3, 667000 }, - { 4, 800000 }, - { 0, CPUFREQ_TABLE_END }, + { 0, 0, 66000 }, + { 0, 0, 100000 }, + { 0, 0, 133000 }, + { 0, 1, 200000 }, + { 0, 1, 222000 }, + { 0, 1, 266000 }, + { 0, 2, 333000 }, + { 0, 2, 400000 }, + { 0, 2, 532000 }, + { 0, 2, 533000 }, + { 0, 3, 667000 }, + { 0, 4, 800000 }, + { 0, 0, CPUFREQ_TABLE_END }, }; #endif -static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table); -} - -static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) -{ - if (cpu != 0) - return 0; - - return clk_get_rate(armclk) / 1000; -} - static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { - int ret; - unsigned int i; - struct cpufreq_freqs freqs; struct s3c64xx_dvfs *dvfs; + unsigned int old_freq, new_freq; + int ret; - ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table, - target_freq, relation, &i); - if (ret != 0) - return ret; - - freqs.old = clk_get_rate(armclk) / 1000; - freqs.new = s3c64xx_freq_table[i].frequency; - freqs.flags = 0; - dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data]; - - if (freqs.old == freqs.new) - return 0; - - pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + old_freq = clk_get_rate(policy->clk) / 1000; + new_freq = s3c64xx_freq_table[index].frequency; + dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data]; #ifdef CONFIG_REGULATOR - if (vddarm && freqs.new > freqs.old) { + if (vddarm && new_freq > old_freq) { ret = regulator_set_voltage(vddarm, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { pr_err("Failed to set VDDARM for %dkHz: %d\n", - freqs.new, ret); - freqs.new = freqs.old; - goto post_notify; + new_freq, ret); + return ret; } } #endif - ret = clk_set_rate(armclk, freqs.new * 1000); + ret = clk_set_rate(policy->clk, new_freq * 1000); if (ret < 0) { pr_err("Failed to set rate %dkHz: %d\n", - freqs.new, ret); - freqs.new = freqs.old; + new_freq, ret); + return ret; } -post_notify: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - if (ret) - goto err; - #ifdef CONFIG_REGULATOR - if (vddarm && freqs.new < freqs.old) { + if (vddarm && new_freq < old_freq) { ret = regulator_set_voltage(vddarm, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { pr_err("Failed to set VDDARM for %dkHz: %d\n", - freqs.new, ret); - goto err_clk; + new_freq, ret); + if (clk_set_rate(policy->clk, old_freq * 1000) < 0) + pr_err("Failed to restore original clock rate\n"); + + return ret; } } #endif pr_debug("Set actual frequency %lukHz\n", - clk_get_rate(armclk) / 1000); + clk_get_rate(policy->clk) / 1000); return 0; - -err_clk: - if (clk_set_rate(armclk, freqs.old * 1000) < 0) - pr_err("Failed to restore original clock rate\n"); -err: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return ret; } #ifdef CONFIG_REGULATOR @@ -161,12 +118,11 @@ static void __init s3c64xx_cpufreq_config_regulator(void) pr_err("Unable to check supported voltages\n"); } - freq = s3c64xx_freq_table; - while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) { - if (freq->frequency == CPUFREQ_ENTRY_INVALID) - continue; + if (!count) + goto out; - dvfs = &s3c64xx_dvfs_table[freq->index]; + cpufreq_for_each_valid_entry(freq, s3c64xx_freq_table) { + dvfs = &s3c64xx_dvfs_table[freq->driver_data]; found = 0; for (i = 0; i < count; i++) { @@ -180,10 +136,9 @@ static void __init s3c64xx_cpufreq_config_regulator(void) freq->frequency); freq->frequency = CPUFREQ_ENTRY_INVALID; } - - freq++; } +out: /* Guess based on having to do an I2C/SPI write; in future we * will be able to query the regulator performance here. */ regulator_latency = 1 * 1000 * 1000; @@ -203,11 +158,11 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) return -ENODEV; } - armclk = clk_get(NULL, "armclk"); - if (IS_ERR(armclk)) { + policy->clk = clk_get(NULL, "armclk"); + if (IS_ERR(policy->clk)) { pr_err("Unable to obtain ARMCLK: %ld\n", - PTR_ERR(armclk)); - return PTR_ERR(armclk); + PTR_ERR(policy->clk)); + return PTR_ERR(policy->clk); } #ifdef CONFIG_REGULATOR @@ -222,12 +177,11 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) } #endif - freq = s3c64xx_freq_table; - while (freq->frequency != CPUFREQ_TABLE_END) { + cpufreq_for_each_entry(freq, s3c64xx_freq_table) { unsigned long r; /* Check for frequencies we can generate */ - r = clk_round_rate(armclk, freq->frequency * 1000); + r = clk_round_rate(policy->clk, freq->frequency * 1000); r /= 1000; if (r != freq->frequency) { pr_debug("%dkHz unsupported by clock\n", @@ -237,36 +191,31 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) /* If we have no regulator then assume startup * frequency is the maximum we can support. */ - if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0)) + if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000) freq->frequency = CPUFREQ_ENTRY_INVALID; - - freq++; } - policy->cur = clk_get_rate(armclk) / 1000; - /* Datasheet says PLL stabalisation time (if we were to use * the PLLs, which we don't currently) is ~300us worst case, * but add some fudge. */ - policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency; - - ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); + ret = cpufreq_generic_init(policy, s3c64xx_freq_table, + (500 * 1000) + regulator_latency); if (ret != 0) { pr_err("Failed to configure frequency table: %d\n", ret); regulator_put(vddarm); - clk_put(armclk); + clk_put(policy->clk); } return ret; } static struct cpufreq_driver s3c64xx_cpufreq_driver = { - .flags = 0, - .verify = s3c64xx_cpufreq_verify_speed, - .target = s3c64xx_cpufreq_set_target, - .get = s3c64xx_cpufreq_get_speed, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = s3c64xx_cpufreq_set_target, + .get = cpufreq_generic_get, .init = s3c64xx_cpufreq_driver_init, .name = "s3c", }; diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 5c775707379..19a10b89fef 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -18,15 +18,12 @@ #include <linux/cpufreq.h> #include <linux/reboot.h> #include <linux/regulator/consumer.h> -#include <linux/suspend.h> #include <mach/map.h> #include <mach/regs-clock.h> -static struct clk *cpu_clk; static struct clk *dmc0_clk; static struct clk *dmc1_clk; -static struct cpufreq_freqs freqs; static DEFINE_MUTEX(set_freq_lock); /* APLL M,P,S values for 1G/800Mhz */ @@ -36,16 +33,7 @@ static DEFINE_MUTEX(set_freq_lock); /* Use 800MHz when entering sleep mode */ #define SLEEP_FREQ (800 * 1000) -/* - * relation has an additional symantics other than the standard of cpufreq - * DISALBE_FURTHER_CPUFREQ: disable further access to target - * ENABLE_FURTUER_CPUFREQ: enable access to target - */ -enum cpufreq_access { - DISABLE_FURTHER_CPUFREQ = 0x10, - ENABLE_FURTHER_CPUFREQ = 0x20, -}; - +/* Tracks if cpu freqency can be updated anymore */ static bool no_cpufreq_access; /* @@ -76,12 +64,12 @@ enum s5pv210_dmc_port { }; static struct cpufreq_frequency_table s5pv210_freq_table[] = { - {L0, 1000*1000}, - {L1, 800*1000}, - {L2, 400*1000}, - {L3, 200*1000}, - {L4, 100*1000}, - {0, CPUFREQ_TABLE_END}, + {0, L0, 1000*1000}, + {0, L1, 800*1000}, + {0, L2, 400*1000}, + {0, L3, 200*1000}, + {0, L4, 100*1000}, + {0, 0, CPUFREQ_TABLE_END}, }; static struct regulator *arm_regulator; @@ -174,68 +162,32 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) __raw_writel(tmp1, reg); } -static int s5pv210_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu) - return -EINVAL; - - return cpufreq_frequency_table_verify(policy, s5pv210_freq_table); -} - -static unsigned int s5pv210_getspeed(unsigned int cpu) -{ - if (cpu) - return 0; - - return clk_get_rate(cpu_clk) / 1000; -} - -static int s5pv210_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) { unsigned long reg; - unsigned int index, priv_index; + unsigned int priv_index; unsigned int pll_changing = 0; unsigned int bus_speed_changing = 0; + unsigned int old_freq, new_freq; int arm_volt, int_volt; int ret = 0; mutex_lock(&set_freq_lock); - if (relation & ENABLE_FURTHER_CPUFREQ) - no_cpufreq_access = false; - if (no_cpufreq_access) { -#ifdef CONFIG_PM_VERBOSE - pr_err("%s:%d denied access to %s as it is disabled" - "temporarily\n", __FILE__, __LINE__, __func__); -#endif - ret = -EINVAL; - goto exit; - } - - if (relation & DISABLE_FURTHER_CPUFREQ) - no_cpufreq_access = true; - - relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ); - - freqs.old = s5pv210_getspeed(0); - - if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, - target_freq, relation, &index)) { + pr_err("Denied access to %s as it is disabled temporarily\n", + __func__); ret = -EINVAL; goto exit; } - freqs.new = s5pv210_freq_table[index].frequency; - - if (freqs.new == freqs.old) - goto exit; + old_freq = policy->cur; + new_freq = s5pv210_freq_table[index].frequency; /* Finding current running level index */ if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, - freqs.old, relation, &priv_index)) { + old_freq, CPUFREQ_RELATION_H, + &priv_index)) { ret = -EINVAL; goto exit; } @@ -243,7 +195,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, arm_volt = dvs_conf[index].arm_volt; int_volt = dvs_conf[index].int_volt; - if (freqs.new > freqs.old) { + if (new_freq > old_freq) { ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt_max); if (ret) @@ -255,8 +207,6 @@ static int s5pv210_target(struct cpufreq_policy *policy, goto exit; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* Check if there need to change PLL */ if ((index == L0) || (priv_index == L0)) pll_changing = 1; @@ -467,9 +417,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, } } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - if (freqs.new < freqs.old) { + if (new_freq < old_freq) { regulator_set_voltage(int_regulator, int_volt, int_volt_max); @@ -484,18 +432,6 @@ exit: return ret; } -#ifdef CONFIG_PM -static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy) -{ - return 0; -} - -static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy) -{ - return 0; -} -#endif - static int check_mem_type(void __iomem *dmc_reg) { unsigned long val; @@ -511,9 +447,9 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) unsigned long mem_type; int ret; - cpu_clk = clk_get(NULL, "armclk"); - if (IS_ERR(cpu_clk)) - return PTR_ERR(cpu_clk); + policy->clk = clk_get(NULL, "armclk"); + if (IS_ERR(policy->clk)) + return PTR_ERR(policy->clk); dmc0_clk = clk_get(NULL, "sclk_dmc0"); if (IS_ERR(dmc0_clk)) { @@ -551,75 +487,42 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); - policy->cur = policy->min = policy->max = s5pv210_getspeed(0); - - cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu); - - policy->cpuinfo.transition_latency = 40000; - - return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table); + policy->suspend_freq = SLEEP_FREQ; + return cpufreq_generic_init(policy, s5pv210_freq_table, 40000); out_dmc1: clk_put(dmc0_clk); out_dmc0: - clk_put(cpu_clk); + clk_put(policy->clk); return ret; } -static int s5pv210_cpufreq_notifier_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - int ret; - - switch (event) { - case PM_SUSPEND_PREPARE: - ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, - DISABLE_FURTHER_CPUFREQ); - if (ret < 0) - return NOTIFY_BAD; - - return NOTIFY_OK; - case PM_POST_RESTORE: - case PM_POST_SUSPEND: - cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, - ENABLE_FURTHER_CPUFREQ); - - return NOTIFY_OK; - } - - return NOTIFY_DONE; -} - static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr) { int ret; - ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, - DISABLE_FURTHER_CPUFREQ); + ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); if (ret < 0) return NOTIFY_BAD; + no_cpufreq_access = true; return NOTIFY_DONE; } static struct cpufreq_driver s5pv210_driver = { - .flags = CPUFREQ_STICKY, - .verify = s5pv210_verify_speed, - .target = s5pv210_target, - .get = s5pv210_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = s5pv210_target, + .get = cpufreq_generic_get, .init = s5pv210_cpu_init, .name = "s5pv210", #ifdef CONFIG_PM - .suspend = s5pv210_cpufreq_suspend, - .resume = s5pv210_cpufreq_resume, + .suspend = cpufreq_generic_suspend, + .resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */ #endif }; -static struct notifier_block s5pv210_cpufreq_notifier = { - .notifier_call = s5pv210_cpufreq_notifier_event, -}; - static struct notifier_block s5pv210_cpufreq_reboot_notifier = { .notifier_call = s5pv210_cpufreq_reboot_notifier_event, }; @@ -639,7 +542,6 @@ static int __init s5pv210_cpufreq_init(void) return PTR_ERR(int_regulator); } - register_pm_notifier(&s5pv210_cpufreq_notifier); register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier); return cpufreq_register_driver(&s5pv210_driver); diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c index cff18e87ca5..728eab77e8e 100644 --- a/drivers/cpufreq/sa1100-cpufreq.c +++ b/drivers/cpufreq/sa1100-cpufreq.c @@ -177,60 +177,33 @@ static void sa1100_update_dram_timings(int current_speed, int new_speed) } } -static int sa1100_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) { unsigned int cur = sa11x0_getspeed(0); - unsigned int new_ppcr; - struct cpufreq_freqs freqs; - - new_ppcr = sa11x0_freq_to_ppcr(target_freq); - switch (relation) { - case CPUFREQ_RELATION_L: - if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max) - new_ppcr--; - break; - case CPUFREQ_RELATION_H: - if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) && - (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min)) - new_ppcr--; - break; - } - - freqs.old = cur; - freqs.new = sa11x0_ppcr_to_freq(new_ppcr); + unsigned int new_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + new_freq = sa11x0_freq_table[ppcr].frequency; - if (freqs.new > cur) - sa1100_update_dram_timings(cur, freqs.new); + if (new_freq > cur) + sa1100_update_dram_timings(cur, new_freq); - PPCR = new_ppcr; + PPCR = ppcr; - if (freqs.new < cur) - sa1100_update_dram_timings(cur, freqs.new); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + if (new_freq < cur) + sa1100_update_dram_timings(cur, new_freq); return 0; } static int __init sa1100_cpu_init(struct cpufreq_policy *policy) { - if (policy->cpu != 0) - return -EINVAL; - policy->cur = policy->min = policy->max = sa11x0_getspeed(0); - policy->cpuinfo.min_freq = 59000; - policy->cpuinfo.max_freq = 287000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; + return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); } static struct cpufreq_driver sa1100_driver __refdata = { - .flags = CPUFREQ_STICKY, - .verify = sa11x0_verify_speed, - .target = sa1100_target, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = sa1100_target, .get = sa11x0_getspeed, .init = sa1100_cpu_init, .name = "sa1100", diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 39c90b6f428..b5befc21117 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -229,36 +229,14 @@ sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram) /* * Ok, set the CPU frequency. */ -static int sa1110_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) { struct sdram_params *sdram = &sdram_params; - struct cpufreq_freqs freqs; struct sdram_info sd; unsigned long flags; - unsigned int ppcr, unused; - - switch (relation) { - case CPUFREQ_RELATION_L: - ppcr = sa11x0_freq_to_ppcr(target_freq); - if (sa11x0_ppcr_to_freq(ppcr) > policy->max) - ppcr--; - break; - case CPUFREQ_RELATION_H: - ppcr = sa11x0_freq_to_ppcr(target_freq); - if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) && - (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min)) - ppcr--; - break; - default: - return -EINVAL; - } - - freqs.old = sa11x0_getspeed(0); - freqs.new = sa11x0_ppcr_to_freq(ppcr); + unsigned int unused; - sdram_calculate_timing(&sd, freqs.new, sdram); + sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram); #if 0 /* @@ -277,8 +255,6 @@ static int sa1110_target(struct cpufreq_policy *policy, sd.mdcas[2] = 0xaaaaaaaa; #endif - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* * The clock could be going away for some time. Set the SDRAMs * to refresh rapidly (every 64 memory clock cycles). To get @@ -323,30 +299,22 @@ static int sa1110_target(struct cpufreq_policy *policy, /* * Now, return the SDRAM refresh back to normal. */ - sdram_update_refresh(freqs.new, sdram); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram); return 0; } static int __init sa1110_cpu_init(struct cpufreq_policy *policy) { - if (policy->cpu != 0) - return -EINVAL; - policy->cur = policy->min = policy->max = sa11x0_getspeed(0); - policy->cpuinfo.min_freq = 59000; - policy->cpuinfo.max_freq = 287000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; + return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); } /* sa1110_driver needs __refdata because it must remain after init registers * it with cpufreq_register_driver() */ static struct cpufreq_driver sa1110_driver __refdata = { - .flags = CPUFREQ_STICKY, - .verify = sa11x0_verify_speed, - .target = sa1110_target, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = sa1110_target, .get = sa11x0_getspeed, .init = sa1110_cpu_init, .name = "sa1110", @@ -381,7 +349,7 @@ static int __init sa1110_clk_init(void) name = "K4S641632D"; if (machine_is_h3100()) name = "KM416S4030CT"; - if (machine_is_jornada720()) + if (machine_is_jornada720() || machine_is_h3600()) name = "K4S281632B-1H"; if (machine_is_nanoengine()) name = "MT48LC8M16A2TG-75"; diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index d6f6c6f4efa..ac84e481801 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c @@ -33,9 +33,9 @@ static __u8 __iomem *cpuctl; #define PFX "sc520_freq: " static struct cpufreq_frequency_table sc520_freq_table[] = { - {0x01, 100000}, - {0x02, 133000}, - {0, CPUFREQ_TABLE_END}, + {0, 0x01, 100000}, + {0, 0x02, 133000}, + {0, 0, CPUFREQ_TABLE_END}, }; static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) @@ -53,21 +53,11 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) } } -static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) +static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state) { - struct cpufreq_freqs freqs; u8 clockspeed_reg; - freqs.old = sc520_freq_get_cpu_frequency(0); - freqs.new = sc520_freq_table[state].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - pr_debug("attempting to set frequency to %i kHz\n", - sc520_freq_table[state].frequency); - local_irq_disable(); clockspeed_reg = *cpuctl & ~0x03; @@ -75,30 +65,9 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int sc520_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); -} - -static int sc520_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target(policy, sc520_freq_table, - target_freq, relation, &newstate)) - return -EINVAL; - - sc520_freq_set_cpu_state(policy, newstate); - return 0; } - /* * Module init and exit code */ @@ -106,7 +75,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy, static int sc520_freq_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); - int result; /* capability check */ if (c->x86_vendor != X86_VENDOR_AMD || @@ -115,39 +83,18 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = sc520_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); - if (result) - return result; - - cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); - - return 0; -} - -static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_table_validate_and_show(policy, sc520_freq_table); } -static struct freq_attr *sc520_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - static struct cpufreq_driver sc520_freq_driver = { .get = sc520_freq_get_cpu_frequency, - .verify = sc520_freq_verify, - .target = sc520_freq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = sc520_freq_target, .init = sc520_freq_cpu_init, - .exit = sc520_freq_cpu_exit, .name = "sc520_freq", - .attr = sc520_freq_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id sc520_ids[] = { diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index ffc6d24b0cf..86628e22b2a 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -68,10 +68,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, freqs.new = (freq + 500) / 1000; freqs.flags = 0; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); set_cpus_allowed_ptr(current, &cpus_allowed); clk_set_rate(cpuclk, freq); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); dev_dbg(dev, "set frequency %lu Hz\n", freq); @@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy) if (freq_table) return cpufreq_frequency_table_verify(policy, freq_table); - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - + cpufreq_verify_within_cpu_limits(policy); return 0; } @@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) return PTR_ERR(cpuclk); } - policy->cur = sh_cpufreq_get(cpu); - freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; if (freq_table) { int result; - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!result) - cpufreq_frequency_table_get_attr(freq_table, cpu); + result = cpufreq_table_validate_and_show(policy, freq_table); + if (result) + return result; } else { dev_notice(dev, "no frequency table found, falling back " "to rate rounding.\n"); @@ -148,17 +143,11 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) unsigned int cpu = policy->cpu; struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - cpufreq_frequency_table_put_attr(cpu); clk_put(cpuclk); return 0; } -static struct freq_attr *sh_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver sh_cpufreq_driver = { .name = "sh", .get = sh_cpufreq_get, @@ -166,7 +155,7 @@ static struct cpufreq_driver sh_cpufreq_driver = { .verify = sh_cpufreq_verify, .init = sh_cpufreq_cpu_init, .exit = sh_cpufreq_cpu_exit, - .attr = sh_freq_attr, + .attr = cpufreq_generic_attr, }; static int __init sh_cpufreq_module_init(void) diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index cf5bc2ca16f..b73feeb666f 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c @@ -245,14 +245,12 @@ static unsigned int us2e_freq_get(unsigned int cpu) return clock_tick / estar_to_divisor(estar); } -static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, - unsigned int index) +static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) { unsigned int cpu = policy->cpu; unsigned long new_bits, new_freq; unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -266,41 +264,15 @@ static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, old_divisor = estar_to_divisor(estar); - freqs.old = clock_tick / old_divisor; - freqs.new = new_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - if (old_divisor != divisor) us2e_transition(estar, new_bits, clock_tick * 1000, old_divisor, divisor); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us2e_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us2e_freq_table[policy->cpu].table[0], - target_freq, relation, &new_index)) - return -EINVAL; - - us2e_set_cpu_divider_index(policy, new_index); return 0; } -static int us2e_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us2e_freq_table[policy->cpu].table[0]); -} - static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; @@ -324,13 +296,13 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = 0; policy->cur = clock_tick; - return cpufreq_frequency_table_cpuinfo(policy, table); + return cpufreq_table_validate_and_show(policy, table); } static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us2e_driver) - us2e_set_cpu_divider_index(policy, 0); + us2e_freq_target(policy, 0); return 0; } @@ -361,8 +333,8 @@ static int __init us2e_freq_init(void) goto err_out; driver->init = us2e_freq_cpu_init; - driver->verify = us2e_freq_verify; - driver->target = us2e_freq_target; + driver->verify = cpufreq_generic_frequency_table_verify; + driver->target_index = us2e_freq_target; driver->get = us2e_freq_get; driver->exit = us2e_freq_cpu_exit; strcpy(driver->name, "UltraSPARC-IIe"); diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index ac76b489979..9bb42ba50ef 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -93,13 +93,11 @@ static unsigned int us3_freq_get(unsigned int cpu) return ret; } -static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, - unsigned int index) +static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index) { unsigned int cpu = policy->cpu; unsigned long new_bits, new_freq, reg; cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -125,43 +123,15 @@ static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, reg = read_safari_cfg(); - freqs.old = get_current_freq(cpu, reg); - freqs.new = new_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - reg &= ~SAFARI_CFG_DIV_MASK; reg |= new_bits; write_safari_cfg(reg); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us3_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us3_freq_table[policy->cpu].table[0], - target_freq, - relation, - &new_index)) - return -EINVAL; - - us3_set_cpu_divider_index(policy, new_index); return 0; } -static int us3_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us3_freq_table[policy->cpu].table[0]); -} - static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; @@ -181,13 +151,13 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = 0; policy->cur = clock_tick; - return cpufreq_frequency_table_cpuinfo(policy, table); + return cpufreq_table_validate_and_show(policy, table); } static int us3_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us3_driver) - us3_set_cpu_divider_index(policy, 0); + us3_freq_target(policy, 0); return 0; } @@ -222,8 +192,8 @@ static int __init us3_freq_init(void) goto err_out; driver->init = us3_freq_cpu_init; - driver->verify = us3_freq_verify; - driver->target = us3_freq_target; + driver->verify = cpufreq_generic_frequency_table_verify; + driver->target_index = us3_freq_target; driver->get = us3_freq_get; driver->exit = us3_freq_cpu_exit; strcpy(driver->name, "UltraSPARC-III"); diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 19e364fa595..38678396636 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> @@ -30,16 +31,6 @@ static struct { u32 cnt; } spear_cpufreq; -static int spear_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); -} - -static unsigned int spear_cpufreq_get(unsigned int cpu) -{ - return clk_get_rate(spear_cpufreq.clk) / 1000; -} - static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq) { struct clk *sys_pclk; @@ -110,20 +101,14 @@ static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq) } static int spear_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { - struct cpufreq_freqs freqs; - unsigned long newfreq; + long newfreq; struct clk *srcclk; - int index, ret, mult = 1; - - if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl, - target_freq, relation, &index)) - return -EINVAL; - - freqs.old = spear_cpufreq_get(0); + int ret, mult = 1; newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; + if (of_machine_is_compatible("st,spear1340")) { /* * SPEAr1340 is special in the sense that due to the possibility @@ -149,73 +134,40 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, } newfreq = clk_round_rate(srcclk, newfreq * mult); - if (newfreq < 0) { + if (newfreq <= 0) { pr_err("clk_round_rate failed for cpu src clock\n"); return newfreq; } - freqs.new = newfreq / 1000; - freqs.new /= mult; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - if (mult == 2) ret = spear1340_set_cpu_rate(srcclk, newfreq); else ret = clk_set_rate(spear_cpufreq.clk, newfreq); - /* Get current rate after clk_set_rate, in case of failure */ - if (ret) { + if (ret) pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret); - freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; - } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } static int spear_cpufreq_init(struct cpufreq_policy *policy) { - int ret; - - ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl); - if (ret) { - pr_err("cpufreq_frequency_table_cpuinfo() failed"); - return ret; - } - - cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu); - policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; - policy->cur = spear_cpufreq_get(0); - - cpumask_setall(policy->cpus); - - return 0; -} - -static int spear_cpufreq_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + policy->clk = spear_cpufreq.clk; + return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl, + spear_cpufreq.transition_latency); } -static struct freq_attr *spear_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver spear_cpufreq_driver = { .name = "cpufreq-spear", - .flags = CPUFREQ_STICKY, - .verify = spear_cpufreq_verify, - .target = spear_cpufreq_target, - .get = spear_cpufreq_get, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = spear_cpufreq_target, + .get = cpufreq_generic_get, .init = spear_cpufreq_init, - .exit = spear_cpufreq_exit, - .attr = spear_cpufreq_attr, + .attr = cpufreq_generic_attr, }; -static int spear_cpufreq_driver_init(void) +static int spear_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; const struct property *prop; @@ -243,18 +195,15 @@ static int spear_cpufreq_driver_init(void) cnt = prop->length / sizeof(u32); val = prop->value; - freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); + freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); if (!freq_tbl) { ret = -ENOMEM; goto out_put_node; } - for (i = 0; i < cnt; i++) { - freq_tbl[i].driver_data = i; + for (i = 0; i < cnt; i++) freq_tbl[i].frequency = be32_to_cpup(val++); - } - freq_tbl[i].driver_data = i; freq_tbl[i].frequency = CPUFREQ_TABLE_END; spear_cpufreq.freq_tbl = freq_tbl; @@ -283,7 +232,15 @@ out_put_node: of_node_put(np); return ret; } -late_initcall(spear_cpufreq_driver_init); + +static struct platform_driver spear_cpufreq_platdrv = { + .driver = { + .name = "spear-cpufreq", + .owner = THIS_MODULE, + }, + .probe = spear_cpufreq_probe, +}; +module_platform_driver(spear_cpufreq_platdrv); MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>"); MODULE_DESCRIPTION("SPEAr CPUFreq driver"); diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index f897d510584..7d4a3157160 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -28,7 +28,7 @@ #include <asm/cpu_device_id.h> #define PFX "speedstep-centrino: " -#define MAINTAINER "cpufreq@vger.kernel.org" +#define MAINTAINER "linux-pm@vger.kernel.org" #define INTEL_MSR_RANGE (0xffff) @@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu) static int centrino_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); - unsigned freq; unsigned l, h; - int ret; int i; /* Only Intel makes Enhanced Speedstep-capable CPUs */ @@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) return -ENODEV; } - if (centrino_cpu_init_table(policy)) { + if (centrino_cpu_init_table(policy)) return -ENODEV; - } /* Check to see if Enhanced SpeedStep is enabled, and try to enable it if not. */ @@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) } } - freq = get_cur_freq(policy->cpu); policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ - policy->cur = freq; - - pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); - ret = cpufreq_frequency_table_cpuinfo(policy, + return cpufreq_table_validate_and_show(policy, per_cpu(centrino_model, policy->cpu)->op_points); - if (ret) - return (ret); - - cpufreq_frequency_table_get_attr( - per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); - - return 0; } static int centrino_cpu_exit(struct cpufreq_policy *policy) @@ -420,44 +406,24 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) if (!per_cpu(centrino_model, cpu)) return -ENODEV; - cpufreq_frequency_table_put_attr(cpu); - per_cpu(centrino_model, cpu) = NULL; return 0; } /** - * centrino_verify - verifies a new CPUFreq policy - * @policy: new policy - * - * Limit must be within this model's frequency range at least one - * border included. - */ -static int centrino_verify (struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - per_cpu(centrino_model, policy->cpu)->op_points); -} - -/** * centrino_setpolicy - set a new CPUFreq policy * @policy: new policy - * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency - * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @index: index of target frequency * * Sets a new CPUFreq policy. */ -static int centrino_target (struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int centrino_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned int newstate = 0; unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; - struct cpufreq_freqs freqs; int retval = 0; - unsigned int j, first_cpu, tmp; + unsigned int j, first_cpu; + struct cpufreq_frequency_table *op_points; cpumask_var_t covered_cpus; if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) @@ -468,16 +434,8 @@ static int centrino_target (struct cpufreq_policy *policy, goto out; } - if (unlikely(cpufreq_frequency_table_target(policy, - per_cpu(centrino_model, cpu)->op_points, - target_freq, - relation, - &newstate))) { - retval = -EINVAL; - goto out; - } - first_cpu = 1; + op_points = &per_cpu(centrino_model, cpu)->op_points[index]; for_each_cpu(j, policy->cpus) { int good_cpu; @@ -501,7 +459,7 @@ static int centrino_target (struct cpufreq_policy *policy, break; } - msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data; + msr = op_points->driver_data; if (first_cpu) { rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); @@ -512,15 +470,6 @@ static int centrino_target (struct cpufreq_policy *policy, goto out; } - freqs.old = extract_clock(oldmsr, cpu, 0); - freqs.new = extract_clock(msr, cpu, 0); - - pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", - target_freq, freqs.old, freqs.new, msr); - - cpufreq_notify_transition(policy, &freqs, - CPUFREQ_PRECHANGE); - first_cpu = 0; /* all but 16 LSB are reserved, treat them with care */ oldmsr &= ~0xffff; @@ -535,8 +484,6 @@ static int centrino_target (struct cpufreq_policy *policy, cpumask_set_cpu(j, covered_cpus); } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - if (unlikely(retval)) { /* * We have failed halfway through the frequency change. @@ -547,12 +494,6 @@ static int centrino_target (struct cpufreq_policy *policy, for_each_cpu(j, covered_cpus) wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); - - tmp = freqs.new; - freqs.new = freqs.old; - freqs.old = tmp; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); } retval = 0; @@ -561,20 +502,15 @@ out: return retval; } -static struct freq_attr* centrino_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver centrino_driver = { .name = "centrino", /* should be speedstep-centrino, but there's a 16 char limit */ .init = centrino_cpu_init, .exit = centrino_cpu_exit, - .verify = centrino_verify, - .target = centrino_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = centrino_target, .get = get_cur_freq, - .attr = centrino_attr, + .attr = cpufreq_generic_attr, }; /* diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index 5355abb69af..1a07b5904ed 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -49,9 +49,9 @@ static u32 pmbase; * are in kHz for the time being. */ static struct cpufreq_frequency_table speedstep_freqs[] = { - {SPEEDSTEP_HIGH, 0}, - {SPEEDSTEP_LOW, 0}, - {0, CPUFREQ_TABLE_END}, + {0, SPEEDSTEP_HIGH, 0}, + {0, SPEEDSTEP_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; @@ -251,56 +251,23 @@ static unsigned int speedstep_get(unsigned int cpu) /** * speedstep_target - set a new CPUFreq policy * @policy: new policy - * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency - * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @index: index of target frequency * * Sets a new CPUFreq policy. */ -static int speedstep_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned int newstate = 0, policy_cpu; - struct cpufreq_freqs freqs; - - if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], - target_freq, relation, &newstate)) - return -EINVAL; + unsigned int policy_cpu; policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); - freqs.old = speedstep_get(policy_cpu); - freqs.new = speedstep_freqs[newstate].frequency; - - pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); - - /* no transition necessary */ - if (freqs.old == freqs.new) - return 0; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, + smp_call_function_single(policy_cpu, _speedstep_set_state, &index, true); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return 0; } -/** - * speedstep_verify - verifies a new CPUFreq policy - * @policy: new policy - * - * Limit must be within speedstep_low_freq and speedstep_high_freq, with - * at least one border included. - */ -static int speedstep_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); -} - struct get_freqs { struct cpufreq_policy *policy; int ret; @@ -320,8 +287,7 @@ static void get_freqs_on_cpu(void *_get_freqs) static int speedstep_cpu_init(struct cpufreq_policy *policy) { - int result; - unsigned int policy_cpu, speed; + unsigned int policy_cpu; struct get_freqs gf; /* only run on CPU to be set, or on its sibling */ @@ -336,49 +302,17 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) if (gf.ret) return gf.ret; - /* get current speed setting */ - speed = speedstep_get(policy_cpu); - if (!speed) - return -EIO; - - pr_debug("currently at %s speed setting - %i MHz\n", - (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) - ? "low" : "high", - (speed / 1000)); - - /* cpuinfo and default policy values */ - policy->cur = speed; - - result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); - if (result) - return result; - - cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); - - return 0; + return cpufreq_table_validate_and_show(policy, speedstep_freqs); } -static int speedstep_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *speedstep_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - static struct cpufreq_driver speedstep_driver = { .name = "speedstep-ich", - .verify = speedstep_verify, - .target = speedstep_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = speedstep_target, .init = speedstep_cpu_init, - .exit = speedstep_cpu_exit, .get = speedstep_get, - .attr = speedstep_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id ss_smi_ids[] = { diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index abfba4f731e..8635eec96da 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -42,9 +42,9 @@ static enum speedstep_processor speedstep_processor; * are in kHz for the time being. */ static struct cpufreq_frequency_table speedstep_freqs[] = { - {SPEEDSTEP_HIGH, 0}, - {SPEEDSTEP_LOW, 0}, - {0, CPUFREQ_TABLE_END}, + {0, SPEEDSTEP_HIGH, 0}, + {0, SPEEDSTEP_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; #define GET_SPEEDSTEP_OWNER 0 @@ -141,38 +141,6 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) } /** - * speedstep_get_state - set the SpeedStep state - * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) - * - */ -static int speedstep_get_state(void) -{ - u32 function = GET_SPEEDSTEP_STATE; - u32 result, state, edi, command, dummy; - - command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - - pr_debug("trying to determine current setting with command %x " - "at port %x\n", command, smi_port); - - __asm__ __volatile__( - "push %%ebp\n" - "out %%al, (%%dx)\n" - "pop %%ebp\n" - : "=a" (result), - "=b" (state), "=D" (edi), - "=c" (dummy), "=d" (dummy), "=S" (dummy) - : "a" (command), "b" (function), "c" (0), - "d" (smi_port), "S" (0), "D" (0) - ); - - pr_debug("state is %x, result is %x\n", state, result); - - return state & 1; -} - - -/** * speedstep_set_state - set the SpeedStep state * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * @@ -235,52 +203,21 @@ static void speedstep_set_state(unsigned int state) /** * speedstep_target - set a new CPUFreq policy * @policy: new policy - * @target_freq: new freq - * @relation: + * @index: index of new freq * * Sets a new CPUFreq policy/freq. */ -static int speedstep_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) +static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned int newstate = 0; - struct cpufreq_freqs freqs; - - if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], - target_freq, relation, &newstate)) - return -EINVAL; - - freqs.old = speedstep_freqs[speedstep_get_state()].frequency; - freqs.new = speedstep_freqs[newstate].frequency; - - if (freqs.old == freqs.new) - return 0; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - speedstep_set_state(newstate); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + speedstep_set_state(index); return 0; } -/** - * speedstep_verify - verifies a new CPUFreq policy - * @policy: new policy - * - * Limit must be within speedstep_low_freq and speedstep_high_freq, with - * at least one border included. - */ -static int speedstep_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); -} - - static int speedstep_cpu_init(struct cpufreq_policy *policy) { int result; - unsigned int speed, state; unsigned int *low, *high; /* capability check */ @@ -316,32 +253,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) pr_debug("workaround worked.\n"); } - /* get current speed setting */ - state = speedstep_get_state(); - speed = speedstep_freqs[state].frequency; - - pr_debug("currently at %s speed setting - %i MHz\n", - (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) - ? "low" : "high", - (speed / 1000)); - - /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = speed; - - result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); - if (result) - return result; - - cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); - - return 0; -} - -static int speedstep_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_table_validate_and_show(policy, speedstep_freqs); } static unsigned int speedstep_get(unsigned int cpu) @@ -362,20 +275,14 @@ static int speedstep_resume(struct cpufreq_policy *policy) return result; } -static struct freq_attr *speedstep_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver speedstep_driver = { .name = "speedstep-smi", - .verify = speedstep_verify, - .target = speedstep_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = speedstep_target, .init = speedstep_cpu_init, - .exit = speedstep_cpu_exit, .get = speedstep_get, .resume = speedstep_resume, - .attr = speedstep_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id ss_smi_ids[] = { diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c index a7b876fdc1d..8084c7f7e20 100644 --- a/drivers/cpufreq/tegra-cpufreq.c +++ b/drivers/cpufreq/tegra-cpufreq.c @@ -26,7 +26,6 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/suspend.h> static struct cpufreq_frequency_table freq_table[] = { { .frequency = 216000 }, @@ -46,74 +45,55 @@ static struct clk *cpu_clk; static struct clk *pll_x_clk; static struct clk *pll_p_clk; static struct clk *emc_clk; +static bool pll_x_prepared; -static unsigned long target_cpu_speed[NUM_CPUS]; -static DEFINE_MUTEX(tegra_cpu_lock); -static bool is_suspended; - -static int tegra_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int tegra_getspeed(unsigned int cpu) +static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy, + unsigned int index) { - unsigned long rate; + unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; - if (cpu >= NUM_CPUS) + /* + * Don't switch to intermediate freq if: + * - we are already at it, i.e. policy->cur == ifreq + * - index corresponds to ifreq + */ + if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq)) return 0; - rate = clk_get_rate(cpu_clk) / 1000; - return rate; + return ifreq; } -static int tegra_cpu_clk_set_rate(unsigned long rate) +static int tegra_target_intermediate(struct cpufreq_policy *policy, + unsigned int index) { int ret; /* * Take an extra reference to the main pll so it doesn't turn - * off when we move the cpu off of it + * off when we move the cpu off of it as enabling it again while we + * switch to it from tegra_target() would take additional time. + * + * When target-freq is equal to intermediate freq we don't need to + * switch to an intermediate freq and so this routine isn't called. + * Also, we wouldn't be using pll_x anymore and must not take extra + * reference to it, as it can be disabled now to save some power. */ clk_prepare_enable(pll_x_clk); ret = clk_set_parent(cpu_clk, pll_p_clk); - if (ret) { - pr_err("Failed to switch cpu to clock pll_p\n"); - goto out; - } - - if (rate == clk_get_rate(pll_p_clk)) - goto out; - - ret = clk_set_rate(pll_x_clk, rate); - if (ret) { - pr_err("Failed to change pll_x to %lu\n", rate); - goto out; - } - - ret = clk_set_parent(cpu_clk, pll_x_clk); - if (ret) { - pr_err("Failed to switch cpu to clock pll_x\n"); - goto out; - } + if (ret) + clk_disable_unprepare(pll_x_clk); + else + pll_x_prepared = true; -out: - clk_disable_unprepare(pll_x_clk); return ret; } -static int tegra_update_cpu_speed(struct cpufreq_policy *policy, - unsigned long rate) +static int tegra_target(struct cpufreq_policy *policy, unsigned int index) { + unsigned long rate = freq_table[index].frequency; + unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; int ret = 0; - struct cpufreq_freqs freqs; - - freqs.old = tegra_getspeed(0); - freqs.new = rate; - - if (freqs.old == freqs.new) - return ret; /* * Vote on memory bus frequency based on cpu frequency @@ -126,131 +106,78 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy, else clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - -#ifdef CONFIG_CPU_FREQ_DEBUG - printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", - freqs.old, freqs.new); -#endif - - ret = tegra_cpu_clk_set_rate(freqs.new * 1000); - if (ret) { - pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", - freqs.new); - freqs.new = freqs.old; - } - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return ret; -} - -static unsigned long tegra_cpu_highest_speed(void) -{ - unsigned long rate = 0; - int i; - - for_each_online_cpu(i) - rate = max(rate, target_cpu_speed[i]); - return rate; -} + /* + * target freq == pll_p, don't need to take extra reference to pll_x_clk + * as it isn't used anymore. + */ + if (rate == ifreq) + return clk_set_parent(cpu_clk, pll_p_clk); -static int tegra_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int idx; - unsigned int freq; - int ret = 0; + ret = clk_set_rate(pll_x_clk, rate * 1000); + /* Restore to earlier frequency on error, i.e. pll_x */ + if (ret) + pr_err("Failed to change pll_x to %lu\n", rate); - mutex_lock(&tegra_cpu_lock); + ret = clk_set_parent(cpu_clk, pll_x_clk); + /* This shouldn't fail while changing or restoring */ + WARN_ON(ret); - if (is_suspended) { - ret = -EBUSY; - goto out; + /* + * Drop count to pll_x clock only if we switched to intermediate freq + * earlier while transitioning to a target frequency. + */ + if (pll_x_prepared) { + clk_disable_unprepare(pll_x_clk); + pll_x_prepared = false; } - cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &idx); - - freq = freq_table[idx].frequency; - - target_cpu_speed[policy->cpu] = freq; - - ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed()); - -out: - mutex_unlock(&tegra_cpu_lock); return ret; } -static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, - void *dummy) -{ - mutex_lock(&tegra_cpu_lock); - if (event == PM_SUSPEND_PREPARE) { - struct cpufreq_policy *policy = cpufreq_cpu_get(0); - is_suspended = true; - pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", - freq_table[0].frequency); - tegra_update_cpu_speed(policy, freq_table[0].frequency); - cpufreq_cpu_put(policy); - } else if (event == PM_POST_SUSPEND) { - is_suspended = false; - } - mutex_unlock(&tegra_cpu_lock); - - return NOTIFY_OK; -} - -static struct notifier_block tegra_cpu_pm_notifier = { - .notifier_call = tegra_pm_notify, -}; - static int tegra_cpu_init(struct cpufreq_policy *policy) { + int ret; + if (policy->cpu >= NUM_CPUS) return -EINVAL; clk_prepare_enable(emc_clk); clk_prepare_enable(cpu_clk); - cpufreq_frequency_table_cpuinfo(policy, freq_table); - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - policy->cur = tegra_getspeed(policy->cpu); - target_cpu_speed[policy->cpu] = policy->cur; - /* FIXME: what's the actual transition time? */ - policy->cpuinfo.transition_latency = 300 * 1000; - - cpumask_copy(policy->cpus, cpu_possible_mask); - - if (policy->cpu == 0) - register_pm_notifier(&tegra_cpu_pm_notifier); + ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); + if (ret) { + clk_disable_unprepare(cpu_clk); + clk_disable_unprepare(emc_clk); + return ret; + } + policy->clk = cpu_clk; + policy->suspend_freq = freq_table[0].frequency; return 0; } static int tegra_cpu_exit(struct cpufreq_policy *policy) { - cpufreq_frequency_table_cpuinfo(policy, freq_table); + clk_disable_unprepare(cpu_clk); clk_disable_unprepare(emc_clk); return 0; } -static struct freq_attr *tegra_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver tegra_cpufreq_driver = { - .verify = tegra_verify_speed, - .target = tegra_target, - .get = tegra_getspeed, - .init = tegra_cpu_init, - .exit = tegra_cpu_exit, - .name = "tegra", - .attr = tegra_cpufreq_attr, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .get_intermediate = tegra_get_intermediate, + .target_intermediate = tegra_target_intermediate, + .target_index = tegra_target, + .get = cpufreq_generic_get, + .init = tegra_cpu_init, + .exit = tegra_cpu_exit, + .name = "tegra", + .attr = cpufreq_generic_attr, +#ifdef CONFIG_PM + .suspend = cpufreq_generic_suspend, +#endif }; static int __init tegra_cpufreq_init(void) diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index b225f04d8ae..6f9dfa80563 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ +#include <linux/err.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> @@ -29,57 +30,44 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy) if (policy->cpu) return -EINVAL; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - + cpufreq_verify_within_cpu_limits(policy); return 0; } -static unsigned int ucv2_getspeed(unsigned int cpu) -{ - struct clk *mclk = clk_get(NULL, "MAIN_CLK"); - - if (cpu) - return 0; - return clk_get_rate(mclk)/1000; -} - static int ucv2_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - unsigned int cur = ucv2_getspeed(0); struct cpufreq_freqs freqs; - struct clk *mclk = clk_get(NULL, "MAIN_CLK"); + int ret; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + freqs.old = policy->cur; + freqs.new = target_freq; - if (!clk_set_rate(mclk, target_freq * 1000)) { - freqs.old = cur; - freqs.new = target_freq; - } + cpufreq_freq_transition_begin(policy, &freqs); + ret = clk_set_rate(policy->clk, target_freq * 1000); + cpufreq_freq_transition_end(policy, &freqs, ret); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; + return ret; } static int __init ucv2_cpu_init(struct cpufreq_policy *policy) { if (policy->cpu != 0) return -EINVAL; - policy->cur = ucv2_getspeed(0); + policy->min = policy->cpuinfo.min_freq = 250000; policy->max = policy->cpuinfo.max_freq = 1000000; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; + policy->clk = clk_get(NULL, "MAIN_CLK"); + return PTR_ERR_OR_ZERO(policy->clk); } static struct cpufreq_driver ucv2_driver = { .flags = CPUFREQ_STICKY, .verify = ucv2_verify_speed, .target = ucv2_target, - .get = ucv2_getspeed, + .get = cpufreq_generic_get, .init = ucv2_cpu_init, .name = "UniCore-II", }; diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c new file mode 100644 index 00000000000..7f7c9c01b44 --- /dev/null +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -0,0 +1,70 @@ +/* + * Versatile Express SPC CPUFreq Interface driver + * + * It provides necessary ops to arm_big_little cpufreq driver. + * + * Copyright (C) 2013 ARM Ltd. + * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/cpufreq.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/types.h> + +#include "arm_big_little.h" + +static int ve_spc_init_opp_table(struct device *cpu_dev) +{ + /* + * platform specific SPC code must initialise the opp table + * so just check if the OPP count is non-zero + */ + return dev_pm_opp_get_opp_count(cpu_dev) <= 0; +} + +static int ve_spc_get_transition_latency(struct device *cpu_dev) +{ + return 1000000; /* 1 ms */ +} + +static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = { + .name = "vexpress-spc", + .get_transition_latency = ve_spc_get_transition_latency, + .init_opp_table = ve_spc_init_opp_table, +}; + +static int ve_spc_cpufreq_probe(struct platform_device *pdev) +{ + return bL_cpufreq_register(&ve_spc_cpufreq_ops); +} + +static int ve_spc_cpufreq_remove(struct platform_device *pdev) +{ + bL_cpufreq_unregister(&ve_spc_cpufreq_ops); + return 0; +} + +static struct platform_driver ve_spc_cpufreq_platdrv = { + .driver = { + .name = "vexpress-spc-cpufreq", + .owner = THIS_MODULE, + }, + .probe = ve_spc_cpufreq_probe, + .remove = ve_spc_cpufreq_remove, +}; +module_platform_driver(ve_spc_cpufreq_platdrv); + +MODULE_LICENSE("GPL"); |
