diff options
Diffstat (limited to 'drivers/cpufreq')
86 files changed, 8447 insertions, 5149 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 534fcb82515..ffe350f86bc 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -17,15 +17,15 @@ config CPU_FREQ if CPU_FREQ -config CPU_FREQ_TABLE - tristate - config CPU_FREQ_GOV_COMMON bool +config CPU_FREQ_BOOST_SW + bool + depends on THERMAL + config CPU_FREQ_STAT tristate "CPU frequency translation statistics" - select CPU_FREQ_TABLE default y help This driver exports CPU frequency statistics information through sysfs @@ -143,7 +143,6 @@ config CPU_FREQ_GOV_USERSPACE config CPU_FREQ_GOV_ONDEMAND tristate "'ondemand' cpufreq policy governor" - select CPU_FREQ_TABLE select CPU_FREQ_GOV_COMMON help 'ondemand' - This driver adds a dynamic cpufreq policy governor. @@ -186,8 +185,10 @@ config CPU_FREQ_GOV_CONSERVATIVE config GENERIC_CPUFREQ_CPU0 tristate "Generic CPU0 cpufreq driver" - depends on HAVE_CLK && REGULATOR && PM_OPP && OF - select CPU_FREQ_TABLE + depends on HAVE_CLK && OF + # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y: + depends on !CPU_THERMAL || THERMAL + select PM_OPP help This adds a generic cpufreq driver for CPU0 frequency management. It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) @@ -201,7 +202,7 @@ source "drivers/cpufreq/Kconfig.x86" endmenu menu "ARM CPU frequency scaling drivers" -depends on ARM +depends on ARM || ARM64 source "drivers/cpufreq/Kconfig.arm" endmenu @@ -223,7 +224,6 @@ depends on IA64 config IA64_ACPI_CPUFREQ tristate "ACPI Processor P-States driver" - select CPU_FREQ_TABLE depends on ACPI_PROCESSOR help This driver adds a CPUFreq driver which utilizes the ACPI @@ -240,7 +240,6 @@ depends on MIPS config LOONGSON2_CPUFREQ tristate "Loongson2 CPUFreq Driver" - select CPU_FREQ_TABLE help This option adds a CPUFreq driver for loongson processors which support software configurable cpu frequency. @@ -262,7 +261,6 @@ menu "SPARC CPU frequency scaling drivers" depends on SPARC64 config SPARC_US3_CPUFREQ tristate "UltraSPARC-III CPU Frequency driver" - select CPU_FREQ_TABLE help This adds the CPUFreq driver for UltraSPARC-III processors. @@ -272,7 +270,6 @@ config SPARC_US3_CPUFREQ config SPARC_US2E_CPUFREQ tristate "UltraSPARC-IIe CPU Frequency driver" - select CPU_FREQ_TABLE help This adds the CPUFreq driver for UltraSPARC-IIe processors. @@ -285,7 +282,6 @@ menu "SH CPU Frequency scaling" depends on SUPERH config SH_CPU_FREQ tristate "SuperH CPU Frequency driver" - select CPU_FREQ_TABLE help This adds the cpufreq driver for SuperH. Any CPU that supports clock rate rounding through the clock framework can use this diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 6e57543fe0b..7364a538e05 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -2,9 +2,11 @@ # ARM CPU Frequency scaling drivers # +# big LITTLE core layer and glue drivers config ARM_BIG_LITTLE_CPUFREQ tristate "Generic ARM big LITTLE CPUfreq driver" - depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK + depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK + select PM_OPP help This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. @@ -15,51 +17,82 @@ config ARM_DT_BL_CPUFREQ This enables probing via DT for Generic CPUfreq driver for ARM big.LITTLE platform. This gets frequency tables from DT. -config ARM_EXYNOS_CPUFREQ - bool "SAMSUNG EXYNOS SoCs" - depends on ARCH_EXYNOS - default y - help - This adds the CPUFreq driver common part for Samsung - EXYNOS SoCs. +config ARM_VEXPRESS_SPC_CPUFREQ + tristate "Versatile Express SPC based CPUfreq driver" + depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC + help + This add the CPUfreq driver support for Versatile Express + big.LITTLE platforms using SPC for power management. - If in doubt, say N. + +config ARM_EXYNOS_CPUFREQ + bool config ARM_EXYNOS4210_CPUFREQ - def_bool CPU_EXYNOS4210 + bool "SAMSUNG EXYNOS4210" + depends on CPU_EXYNOS4210 + default y + select ARM_EXYNOS_CPUFREQ help This adds the CPUFreq driver for Samsung EXYNOS4210 SoC (S5PV310 or S5PC210). + If in doubt, say N. + config ARM_EXYNOS4X12_CPUFREQ - def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412) + bool "SAMSUNG EXYNOS4x12" + depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 + default y + select ARM_EXYNOS_CPUFREQ help This adds the CPUFreq driver for Samsung EXYNOS4X12 SoC (EXYNOS4212 or EXYNOS4412). + If in doubt, say N. + config ARM_EXYNOS5250_CPUFREQ - def_bool SOC_EXYNOS5250 + bool "SAMSUNG EXYNOS5250" + depends on SOC_EXYNOS5250 + default y + select ARM_EXYNOS_CPUFREQ help This adds the CPUFreq driver for Samsung EXYNOS5250 SoC. + If in doubt, say N. + config ARM_EXYNOS5440_CPUFREQ - def_bool SOC_EXYNOS5440 - depends on HAVE_CLK && PM_OPP && OF + bool "SAMSUNG EXYNOS5440" + depends on SOC_EXYNOS5440 + depends on HAVE_CLK && OF + select PM_OPP + default y help This adds the CPUFreq driver for Samsung EXYNOS5440 SoC. The nature of exynos5440 clock controller is different than previous exynos controllers so not using the common exynos framework. + If in doubt, say N. + +config ARM_EXYNOS_CPU_FREQ_BOOST_SW + bool "EXYNOS Frequency Overclocking - Software" + depends on ARM_EXYNOS_CPUFREQ + select CPU_FREQ_BOOST_SW + select EXYNOS_THERMAL + help + This driver supports software managed overclocking (BOOST). + It allows usage of special frequencies for Samsung Exynos + processors if thermal conditions are appropriate. + + It requires, for safe operation, thermal framework with properly + defined trip points. + + If in doubt, say N. + config ARM_HIGHBANK_CPUFREQ tristate "Calxeda Highbank-based" - depends on ARCH_HIGHBANK - select CPU_FREQ_TABLE - select GENERIC_CPUFREQ_CPU0 - select PM_OPP - select REGULATOR - + depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR default m help This adds the CPUFreq driver for Calxeda Highbank SoC @@ -68,11 +101,12 @@ config ARM_HIGHBANK_CPUFREQ If in doubt, say N. config ARM_IMX6Q_CPUFREQ - tristate "Freescale i.MX6Q cpufreq support" - depends on SOC_IMX6Q + tristate "Freescale i.MX6 cpufreq support" + depends on ARCH_MXC depends on REGULATOR_ANATOP + select PM_OPP help - This adds cpufreq driver support for Freescale i.MX6Q SOC. + This adds cpufreq driver support for Freescale i.MX6 series SoCs. If in doubt, say N. @@ -85,7 +119,7 @@ config ARM_INTEGRATOR If in doubt, say Y. config ARM_KIRKWOOD_CPUFREQ - def_bool ARCH_KIRKWOOD && OF + def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD help This adds the CPUFreq driver for Marvell Kirkwood SoCs. @@ -94,7 +128,56 @@ config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" depends on ARCH_OMAP2PLUS default ARCH_OMAP2PLUS - select CPU_FREQ_TABLE + +config ARM_S3C_CPUFREQ + bool + help + Internal configuration node for common cpufreq on Samsung SoC + +config ARM_S3C24XX_CPUFREQ + bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)" + depends on ARCH_S3C24XX + select ARM_S3C_CPUFREQ + help + This enables the CPUfreq driver for the Samsung S3C24XX family + of CPUs. + + For details, take a look at <file:Documentation/cpu-freq>. + + If in doubt, say N. + +config ARM_S3C24XX_CPUFREQ_DEBUG + bool "Debug CPUfreq Samsung driver core" + depends on ARM_S3C24XX_CPUFREQ + help + Enable s3c_freq_dbg for the Samsung S3C CPUfreq core + +config ARM_S3C24XX_CPUFREQ_IODEBUG + bool "Debug CPUfreq Samsung driver IO timing" + depends on ARM_S3C24XX_CPUFREQ + help + Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core + +config ARM_S3C24XX_CPUFREQ_DEBUGFS + bool "Export debugfs for CPUFreq" + depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS + help + Export status information via debugfs. + +config ARM_S3C2410_CPUFREQ + bool + depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410 + select S3C2410_CPUFREQ_UTILS + help + CPU Frequency scaling support for S3C2410 + +config ARM_S3C2412_CPUFREQ + bool + depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412 + default y + select S3C2412_IOTIMING + help + CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs. config ARM_S3C2416_CPUFREQ bool "S3C2416 CPU Frequency scaling support" @@ -104,7 +187,7 @@ config ARM_S3C2416_CPUFREQ S3C2450 SoC. The S3C2416 supports changing the rate of the armdiv clock source and also entering a so called dynamic voltage scaling mode in which it is possible to reduce the - core voltage of the cpu. + core voltage of the CPU. If in doubt, say N. @@ -118,6 +201,14 @@ config ARM_S3C2416_CPUFREQ_VCORESCALE If in doubt, say N. +config ARM_S3C2440_CPUFREQ + bool "S3C2440/S3C2442 CPU Frequency scaling support" + depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442) + select S3C2410_CPUFREQ_UTILS + default y + help + CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs. + config ARM_S3C64XX_CPUFREQ bool "Samsung S3C64XX" depends on CPU_S3C6410 @@ -130,7 +221,6 @@ config ARM_S3C64XX_CPUFREQ config ARM_S5PV210_CPUFREQ bool "Samsung S5PV210 and S5PC110" depends on CPU_S5PV210 - select CPU_FREQ_TABLE default y help This adds the CPUFreq driver for Samsung S5PV210 and @@ -150,3 +240,10 @@ config ARM_SPEAR_CPUFREQ default y help This adds the CPUFreq driver support for SPEAr SOCs. + +config ARM_TEGRA_CPUFREQ + bool "TEGRA CPUFreq support" + depends on ARCH_TEGRA + default y + help + This adds the CPUFreq driver support for TEGRA SOCs. diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 9c926ca0d71..72564b701b4 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc @@ -19,7 +19,46 @@ config CPU_FREQ_CBE_PMI config CPU_FREQ_MAPLE bool "Support for Maple 970FX Evaluation Board" depends on PPC_MAPLE - select CPU_FREQ_TABLE help This adds support for frequency switching on Maple 970FX Evaluation Board and compatible boards (IBM JS2x blades). + +config PPC_CORENET_CPUFREQ + tristate "CPU frequency scaling driver for Freescale E500MC SoCs" + depends on PPC_E500MC && OF && COMMON_CLK + select CLK_PPC_CORENET + help + This adds the CPUFreq driver support for Freescale e500mc, + e5500 and e6500 series SoCs which are capable of changing + the CPU's frequency dynamically. + +config CPU_FREQ_PMAC + bool "Support for Apple PowerBooks" + depends on ADB_PMU && PPC32 + help + This adds support for frequency switching on Apple PowerBooks, + this currently includes some models of iBook & Titanium + PowerBook. + +config CPU_FREQ_PMAC64 + bool "Support for some Apple G5s" + depends on PPC_PMAC && PPC64 + help + This adds support for frequency switching on Apple iMac G5, + and some of the more recent desktop G5 machines as well. + +config PPC_PASEMI_CPUFREQ + bool "Support for PA Semi PWRficient" + depends on PPC_PASEMI + default y + help + This adds the support for frequency switching on PA Semi + PWRficient processors. + +config POWERNV_CPUFREQ + tristate "CPU frequency scaling for IBM POWERNV platform" + depends on PPC_POWERNV + default y + help + This adds support for CPU frequency switching on IBM POWERNV + platform diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 6bd63d63d35..89ae88f9189 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -10,7 +10,7 @@ config X86_INTEL_PSTATE The driver implements an internal governor and will become the scaling driver and governor for Sandy bridge processors. - When this driver is enabled it will become the perferred + When this driver is enabled it will become the preferred scaling driver for Sandy bridge processors. If in doubt, say N. @@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ config X86_ACPI_CPUFREQ tristate "ACPI Processor P-States driver" - select CPU_FREQ_TABLE depends on ACPI_PROCESSOR help This driver adds a CPUFreq driver which utilizes the ACPI @@ -53,14 +52,13 @@ config X86_ACPI_CPUFREQ_CPB help The powernow-k8 driver used to provide a sysfs knob called "cpb" to disable the Core Performance Boosting feature of AMD CPUs. This - file has now been superseeded by the more generic "boost" entry. + file has now been superseded by the more generic "boost" entry. By enabling this option the acpi_cpufreq driver provides the old entry in addition to the new boost ones, for compatibility reasons. config ELAN_CPUFREQ tristate "AMD Elan SC400 and SC410" - select CPU_FREQ_TABLE depends on MELAN ---help--- This adds the CPUFreq driver for AMD Elan SC400 and SC410 @@ -76,7 +74,6 @@ config ELAN_CPUFREQ config SC520_CPUFREQ tristate "AMD Elan SC520" - select CPU_FREQ_TABLE depends on MELAN ---help--- This adds the CPUFreq driver for AMD Elan SC520 processor. @@ -88,7 +85,6 @@ config SC520_CPUFREQ config X86_POWERNOW_K6 tristate "AMD Mobile K6-2/K6-3 PowerNow!" - select CPU_FREQ_TABLE depends on X86_32 help This adds the CPUFreq driver for mobile AMD K6-2+ and mobile @@ -100,7 +96,6 @@ config X86_POWERNOW_K6 config X86_POWERNOW_K7 tristate "AMD Mobile Athlon/Duron PowerNow!" - select CPU_FREQ_TABLE depends on X86_32 help This adds the CPUFreq driver for mobile AMD K7 mobile processors. @@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI config X86_POWERNOW_K8 tristate "AMD Opteron/Athlon64 PowerNow!" - select CPU_FREQ_TABLE depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ help This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. @@ -135,7 +129,7 @@ config X86_AMD_FREQ_SENSITIVITY help This adds AMD-specific powersave bias function to the ondemand governor, which allows it to make more power-conscious frequency - change decisions based on feedback from hardware (availble on AMD + change decisions based on feedback from hardware (available on AMD Family 16h and above). Hardware feedback tells software how "sensitive" to frequency changes @@ -159,7 +153,6 @@ config X86_GX_SUSPMOD config X86_SPEEDSTEP_CENTRINO tristate "Intel Enhanced SpeedStep (deprecated)" - select CPU_FREQ_TABLE select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32 depends on X86_32 || (X86_64 && ACPI_PROCESSOR) help @@ -189,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE config X86_SPEEDSTEP_ICH tristate "Intel Speedstep on ICH-M chipsets (ioport interface)" - select CPU_FREQ_TABLE depends on X86_32 help This adds the CPUFreq driver for certain mobile Intel Pentium III @@ -203,7 +195,6 @@ config X86_SPEEDSTEP_ICH config X86_SPEEDSTEP_SMI tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)" - select CPU_FREQ_TABLE depends on X86_32 help This adds the CPUFreq driver for certain mobile Intel Pentium III @@ -216,7 +207,6 @@ config X86_SPEEDSTEP_SMI config X86_P4_CLOCKMOD tristate "Intel Pentium 4 clock modulation" - select CPU_FREQ_TABLE help This adds the CPUFreq driver for Intel Pentium 4 / XEON processors. When enabled it will lower CPU temperature by skipping @@ -258,7 +248,6 @@ config X86_LONGRUN config X86_LONGHAUL tristate "VIA Cyrix III Longhaul" - select CPU_FREQ_TABLE depends on X86_32 && ACPI_PROCESSOR help This adds the CPUFreq driver for VIA Samuel/CyrixIII, @@ -271,7 +260,6 @@ config X86_LONGHAUL config X86_E_POWERSAVER tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" - select CPU_FREQ_TABLE depends on X86_32 && ACPI_PROCESSOR help This adds the CPUFreq driver for VIA C7 processors. However, this driver diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 315b9231feb..db6d9a2fea4 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -1,5 +1,7 @@ # CPUfreq core -obj-$(CONFIG_CPU_FREQ) += cpufreq.o +obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o +obj-$(CONFIG_PM_OPP) += cpufreq_opp.o + # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o @@ -11,9 +13,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o -# CPUfreq cross-arch helpers -obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o - obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o ################################################################################## @@ -23,7 +22,7 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o # powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers. # speedstep-* is preferred over p4-clockmod. -obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o +obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o @@ -50,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o @@ -65,13 +64,19 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o +obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o +obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o +obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o +obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o +obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o -obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o +obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o +obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o ################################################################################## # PowerPC platform drivers @@ -79,11 +84,16 @@ obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o +obj-$(CONFIG_PPC_CORENET_CPUFREQ) += ppc-corenet-cpufreq.o +obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o +obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o +obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o +obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o ################################################################################## # Other platform drivers obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o -obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o +obj-$(CONFIG_BFIN_CPU_FREQ) += blackfin-cpufreq.o obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index edc089e9d0c..b0c18ed8d83 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -45,7 +45,6 @@ #include <asm/msr.h> #include <asm/processor.h> #include <asm/cpufeature.h> -#include "mperf.h" MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); @@ -70,6 +69,7 @@ struct acpi_cpufreq_data { struct cpufreq_frequency_table *freq_table; unsigned int resume; unsigned int cpu_feature; + cpumask_var_t freqdomain_cpus; }; static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); @@ -80,7 +80,6 @@ static struct acpi_processor_performance __percpu *acpi_perf_data; static struct cpufreq_driver acpi_cpufreq_driver; static unsigned int acpi_pstate_strict; -static bool boost_enabled, boost_supported; static struct msr __percpu *msrs; static bool boost_state(unsigned int cpu) @@ -133,62 +132,55 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask) wrmsr_on_cpus(cpumask, msr_addr, msrs); } -static ssize_t _store_boost(const char *buf, size_t count) +static int _store_boost(int val) { - int ret; - unsigned long val = 0; - - if (!boost_supported) - return -EINVAL; - - ret = kstrtoul(buf, 10, &val); - if (ret || (val > 1)) - return -EINVAL; - - if ((val && boost_enabled) || (!val && !boost_enabled)) - return count; - get_online_cpus(); - boost_set_msrs(val, cpu_online_mask); - put_online_cpus(); - - boost_enabled = val; pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); - return count; + return 0; } -static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) +static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) { - return _store_boost(buf, count); -} + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); -static ssize_t show_global_boost(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", boost_enabled); + return cpufreq_show_cpus(data->freqdomain_cpus, buf); } -static struct global_attr global_boost = __ATTR(boost, 0644, - show_global_boost, - store_global_boost); +cpufreq_freq_attr_ro(freqdomain_cpus); #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB +static ssize_t store_boost(const char *buf, size_t count) +{ + int ret; + unsigned long val = 0; + + if (!acpi_cpufreq_driver.boost_supported) + return -EINVAL; + + ret = kstrtoul(buf, 10, &val); + if (ret || (val > 1)) + return -EINVAL; + + _store_boost((int) val); + + return count; +} + static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, size_t count) { - return _store_boost(buf, count); + return store_boost(buf, count); } static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) { - return sprintf(buf, "%u\n", boost_enabled); + return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled); } -static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb); +cpufreq_freq_attr_rw(cpb); #endif static int check_est_cpu(unsigned int cpuid) @@ -221,7 +213,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) { - int i; + struct cpufreq_frequency_table *pos; struct acpi_processor_performance *perf; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) @@ -231,10 +223,9 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) perf = data->acpi_data; - for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { - if (msr == perf->states[data->freq_table[i].index].status) - return data->freq_table[i].frequency; - } + cpufreq_for_each_entry(pos, data->freq_table) + if (msr == perf->states[pos->driver_data].status) + return pos->frequency; return data->freq_table[0].frequency; } @@ -415,34 +406,21 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, } static int acpi_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_processor_performance *perf; - struct cpufreq_freqs freqs; struct drv_cmd cmd; - unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ int result = 0; - pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); - if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { return -ENODEV; } perf = data->acpi_data; - result = cpufreq_frequency_table_target(policy, - data->freq_table, - target_freq, - relation, &next_state); - if (unlikely(result)) { - result = -ENODEV; - goto out; - } - - next_perf_state = data->freq_table[next_state].index; + next_perf_state = data->freq_table[index].driver_data; if (perf->state == next_perf_state) { if (unlikely(data->resume)) { pr_debug("Called after resume, resetting to P%d\n", @@ -483,37 +461,24 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, else cmd.mask = cpumask_of(policy->cpu); - freqs.old = perf->states[perf->state].core_frequency * 1000; - freqs.new = data->freq_table[next_state].frequency; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - drv_write(&cmd); if (acpi_pstate_strict) { - if (!check_freqs(cmd.mask, freqs.new, data)) { + if (!check_freqs(cmd.mask, data->freq_table[index].frequency, + data)) { pr_debug("acpi_cpufreq_target failed (%d)\n", policy->cpu); result = -EAGAIN; - goto out; } } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - perf->state = next_perf_state; + if (!result) + perf->state = next_perf_state; out: return result; } -static int acpi_cpufreq_verify(struct cpufreq_policy *policy) -{ - struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - - pr_debug("acpi_cpufreq_verify\n"); - - return cpufreq_frequency_table_verify(policy, data->freq_table); -} - static unsigned long acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) { @@ -571,7 +536,7 @@ static int boost_notify(struct notifier_block *nb, unsigned long action, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - boost_set_msrs(boost_enabled, cpumask); + boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask); break; case CPU_DOWN_PREPARE: @@ -698,10 +663,15 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) return blacklisted; #endif - data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) { + result = -ENOMEM; + goto err_free; + } + data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); per_cpu(acfreq_data, cpu) = data; @@ -710,7 +680,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) result = acpi_processor_register_performance(data->acpi_data, cpu); if (result) - goto err_free; + goto err_free_mask; perf = data->acpi_data; policy->shared_type = perf->shared_type; @@ -723,6 +693,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { cpumask_copy(policy->cpus, perf->shared_cpu_map); } + cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map); #ifdef CONFIG_SMP dmi_check_system(sw_any_bug_dmi_table); @@ -734,6 +705,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { cpumask_clear(policy->cpus); cpumask_set_cpu(cpu, policy->cpus); + cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu)); policy->shared_type = CPUFREQ_SHARED_TYPE_HW; pr_info_once(PFX "overriding BIOS provided _PSD data\n"); } @@ -781,7 +753,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_unreg; } - data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * + data->freq_table = kzalloc(sizeof(*data->freq_table) * (perf->state_count+1), GFP_KERNEL); if (!data->freq_table) { result = -ENOMEM; @@ -811,7 +783,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->freq_table[valid_states-1].frequency / 1000) continue; - data->freq_table[valid_states].index = i; + data->freq_table[valid_states].driver_data = i; data->freq_table[valid_states].frequency = perf->states[i].core_frequency * 1000; valid_states++; @@ -819,7 +791,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; perf->state = 0; - result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); + result = cpufreq_table_validate_and_show(policy, data->freq_table); if (result) goto err_freqfree; @@ -828,12 +800,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: - /* Current speed is unknown and not detectable by IO port */ + /* + * The core will not set policy->cur, because + * cpufreq_driver->get is NULL, so we need to set it here. + * However, we have to guess it, because the current speed is + * unknown and not detectable via IO ports. + */ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); break; case ACPI_ADR_SPACE_FIXED_HARDWARE: acpi_cpufreq_driver.get = get_cur_freq_on_cpu; - policy->cur = get_cur_freq_on_cpu(cpu); break; default: break; @@ -842,10 +818,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); - /* Check for APERF/MPERF support in hardware */ - if (boot_cpu_has(X86_FEATURE_APERFMPERF)) - acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; - pr_debug("CPU%u - ACPI performance management activated.\n", cpu); for (i = 0; i < perf->state_count; i++) pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", @@ -854,8 +826,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) (u32) perf->states[i].power, (u32) perf->states[i].transition_latency); - cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); - /* * the first call to ->target() should result in us actually * writing something to the appropriate registers. @@ -868,6 +838,8 @@ err_freqfree: kfree(data->freq_table); err_unreg: acpi_processor_unregister_performance(perf, cpu); +err_free_mask: + free_cpumask_var(data->freqdomain_cpus); err_free: kfree(data); per_cpu(acfreq_data, cpu) = NULL; @@ -882,10 +854,10 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { - cpufreq_frequency_table_put_attr(policy->cpu); per_cpu(acfreq_data, policy->cpu) = NULL; acpi_processor_unregister_performance(data->acpi_data, policy->cpu); + free_cpumask_var(data->freqdomain_cpus); kfree(data->freq_table); kfree(data); } @@ -906,20 +878,21 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) static struct freq_attr *acpi_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, + &freqdomain_cpus, NULL, /* this is a placeholder for cpb, do not remove */ NULL, }; static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = acpi_cpufreq_verify, - .target = acpi_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = acpi_cpufreq_target, .bios_limit = acpi_processor_get_bios_limit, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, .resume = acpi_cpufreq_resume, .name = "acpi-cpufreq", - .owner = THIS_MODULE, .attr = acpi_cpufreq_attr, + .set_boost = _store_boost, }; static void __init acpi_cpufreq_boost_init(void) @@ -930,33 +903,23 @@ static void __init acpi_cpufreq_boost_init(void) if (!msrs) return; - boost_supported = true; - boost_enabled = boost_state(0); + acpi_cpufreq_driver.boost_supported = true; + acpi_cpufreq_driver.boost_enabled = boost_state(0); - get_online_cpus(); + cpu_notifier_register_begin(); /* Force all MSRs to the same value */ - boost_set_msrs(boost_enabled, cpu_online_mask); + boost_set_msrs(acpi_cpufreq_driver.boost_enabled, + cpu_online_mask); - register_cpu_notifier(&boost_nb); + __register_cpu_notifier(&boost_nb); - put_online_cpus(); - } else - global_boost.attr.mode = 0444; - - /* We create the boost file in any case, though for systems without - * hardware support it will be read-only and hardwired to return 0. - */ - if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr))) - pr_warn(PFX "could not register global boost sysfs file\n"); - else - pr_debug("registered global boost sysfs file\n"); + cpu_notifier_register_done(); + } } -static void __exit acpi_cpufreq_boost_exit(void) +static void acpi_cpufreq_boost_exit(void) { - sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr)); - if (msrs) { unregister_cpu_notifier(&boost_nb); @@ -970,7 +933,11 @@ static int __init acpi_cpufreq_init(void) int ret; if (acpi_disabled) - return 0; + return -ENODEV; + + /* don't keep reloading if cpufreq_driver exists */ + if (cpufreq_get_current_driver()) + return -EEXIST; pr_debug("acpi_cpufreq_init\n"); @@ -998,13 +965,13 @@ static int __init acpi_cpufreq_init(void) *iter = &cpb; } #endif + acpi_cpufreq_boost_init(); ret = cpufreq_register_driver(&acpi_cpufreq_driver); - if (ret) + if (ret) { free_acpi_perf_data(); - else - acpi_cpufreq_boost_init(); - + acpi_cpufreq_boost_exit(); + } return ret; } @@ -1034,4 +1001,11 @@ static const struct x86_cpu_id acpi_cpufreq_ids[] = { }; MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); +static const struct acpi_device_id processor_device_ids[] = { + {ACPI_PROCESSOR_OBJECT_HID, }, + {ACPI_PROCESSOR_DEVICE_HID, }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, processor_device_ids); + MODULE_ALIAS("acpi"); diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 5d7f53fcd6f..1f4d4e31505 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -24,112 +24,323 @@ #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/export.h> +#include <linux/mutex.h> #include <linux/of_platform.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/topology.h> #include <linux/types.h> +#include <asm/bL_switcher.h> #include "arm_big_little.h" /* Currently we support only two clusters */ +#define A15_CLUSTER 0 +#define A7_CLUSTER 1 #define MAX_CLUSTERS 2 +#ifdef CONFIG_BL_SWITCHER +static bool bL_switching_enabled; +#define is_bL_switching_enabled() bL_switching_enabled +#define set_switching_enabled(x) (bL_switching_enabled = (x)) +#else +#define is_bL_switching_enabled() false +#define set_switching_enabled(x) do { } while (0) +#endif + +#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) +#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) + static struct cpufreq_arm_bL_ops *arm_bL_ops; static struct clk *clk[MAX_CLUSTERS]; -static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; -static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; +static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; +static atomic_t cluster_usage[MAX_CLUSTERS + 1]; + +static unsigned int clk_big_min; /* (Big) clock frequencies */ +static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ + +static DEFINE_PER_CPU(unsigned int, physical_cluster); +static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); -static unsigned int bL_cpufreq_get(unsigned int cpu) +static struct mutex cluster_lock[MAX_CLUSTERS]; + +static inline int raw_cpu_to_cluster(int cpu) { - u32 cur_cluster = cpu_to_cluster(cpu); + return topology_physical_package_id(cpu); +} - return clk_get_rate(clk[cur_cluster]) / 1000; +static inline int cpu_to_cluster(int cpu) +{ + return is_bL_switching_enabled() ? + MAX_CLUSTERS : raw_cpu_to_cluster(cpu); } -/* Validate policy frequency range */ -static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy) +static unsigned int find_cluster_maxfreq(int cluster) { - u32 cur_cluster = cpu_to_cluster(policy->cpu); + int j; + u32 max_freq = 0, cpu_freq; - return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]); + for_each_online_cpu(j) { + cpu_freq = per_cpu(cpu_last_req_freq, j); + + if ((cluster == per_cpu(physical_cluster, j)) && + (max_freq < cpu_freq)) + max_freq = cpu_freq; + } + + pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster, + max_freq); + + return max_freq; +} + +static unsigned int clk_get_cpu_rate(unsigned int cpu) +{ + u32 cur_cluster = per_cpu(physical_cluster, cpu); + u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; + + /* For switcher we use virtual A7 clock rates */ + if (is_bL_switching_enabled()) + rate = VIRT_FREQ(cur_cluster, rate); + + pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, + cur_cluster, rate); + + return rate; +} + +static unsigned int bL_cpufreq_get_rate(unsigned int cpu) +{ + if (is_bL_switching_enabled()) { + pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, + cpu)); + + return per_cpu(cpu_last_req_freq, cpu); + } else { + return clk_get_cpu_rate(cpu); + } +} + +static unsigned int +bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) +{ + u32 new_rate, prev_rate; + int ret; + bool bLs = is_bL_switching_enabled(); + + mutex_lock(&cluster_lock[new_cluster]); + + if (bLs) { + prev_rate = per_cpu(cpu_last_req_freq, cpu); + per_cpu(cpu_last_req_freq, cpu) = rate; + per_cpu(physical_cluster, cpu) = new_cluster; + + new_rate = find_cluster_maxfreq(new_cluster); + new_rate = ACTUAL_FREQ(new_cluster, new_rate); + } else { + new_rate = rate; + } + + pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", + __func__, cpu, old_cluster, new_cluster, new_rate); + + ret = clk_set_rate(clk[new_cluster], new_rate * 1000); + if (WARN_ON(ret)) { + pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, + new_cluster); + if (bLs) { + per_cpu(cpu_last_req_freq, cpu) = prev_rate; + per_cpu(physical_cluster, cpu) = old_cluster; + } + + mutex_unlock(&cluster_lock[new_cluster]); + + return ret; + } + + mutex_unlock(&cluster_lock[new_cluster]); + + /* Recalc freq for old cluster when switching clusters */ + if (old_cluster != new_cluster) { + pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", + __func__, cpu, old_cluster, new_cluster); + + /* Switch cluster */ + bL_switch_request(cpu, new_cluster); + + mutex_lock(&cluster_lock[old_cluster]); + + /* Set freq of old cluster if there are cpus left on it */ + new_rate = find_cluster_maxfreq(old_cluster); + new_rate = ACTUAL_FREQ(old_cluster, new_rate); + + if (new_rate) { + pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", + __func__, old_cluster, new_rate); + + if (clk_set_rate(clk[old_cluster], new_rate * 1000)) + pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", + __func__, ret, old_cluster); + } + mutex_unlock(&cluster_lock[old_cluster]); + } + + return 0; } /* Set clock frequency */ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { - struct cpufreq_freqs freqs; - u32 cpu = policy->cpu, freq_tab_idx, cur_cluster; - int ret = 0; + u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; + unsigned int freqs_new; + + cur_cluster = cpu_to_cluster(cpu); + new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); + + freqs_new = freq_table[cur_cluster][index].frequency; + + if (is_bL_switching_enabled()) { + if ((actual_cluster == A15_CLUSTER) && + (freqs_new < clk_big_min)) { + new_cluster = A7_CLUSTER; + } else if ((actual_cluster == A7_CLUSTER) && + (freqs_new > clk_little_max)) { + new_cluster = A15_CLUSTER; + } + } - cur_cluster = cpu_to_cluster(policy->cpu); + return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); +} - freqs.old = bL_cpufreq_get(policy->cpu); +static inline u32 get_table_count(struct cpufreq_frequency_table *table) +{ + int count; - /* Determine valid target frequency using freq_table */ - cpufreq_frequency_table_target(policy, freq_table[cur_cluster], - target_freq, relation, &freq_tab_idx); - freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency; + for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) + ; - pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n", - __func__, cpu, cur_cluster, freqs.old, target_freq, - freqs.new); + return count; +} - if (freqs.old == freqs.new) - return 0; +/* get the minimum frequency in the cpufreq_frequency_table */ +static inline u32 get_table_min(struct cpufreq_frequency_table *table) +{ + struct cpufreq_frequency_table *pos; + uint32_t min_freq = ~0; + cpufreq_for_each_entry(pos, table) + if (pos->frequency < min_freq) + min_freq = pos->frequency; + return min_freq; +} - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); +/* get the maximum frequency in the cpufreq_frequency_table */ +static inline u32 get_table_max(struct cpufreq_frequency_table *table) +{ + struct cpufreq_frequency_table *pos; + uint32_t max_freq = 0; + cpufreq_for_each_entry(pos, table) + if (pos->frequency > max_freq) + max_freq = pos->frequency; + return max_freq; +} - ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000); - if (ret) { - pr_err("clk_set_rate failed: %d\n", ret); - return ret; +static int merge_cluster_tables(void) +{ + int i, j, k = 0, count = 1; + struct cpufreq_frequency_table *table; + + for (i = 0; i < MAX_CLUSTERS; i++) + count += get_table_count(freq_table[i]); + + table = kzalloc(sizeof(*table) * count, GFP_KERNEL); + if (!table) + return -ENOMEM; + + freq_table[MAX_CLUSTERS] = table; + + /* Add in reverse order to get freqs in increasing order */ + for (i = MAX_CLUSTERS - 1; i >= 0; i--) { + for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; + j++) { + table[k].frequency = VIRT_FREQ(i, + freq_table[i][j].frequency); + pr_debug("%s: index: %d, freq: %d\n", __func__, k, + table[k].frequency); + k++; + } } - policy->cur = freqs.new; + table[k].driver_data = k; + table[k].frequency = CPUFREQ_TABLE_END; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); - return ret; + return 0; +} + +static void _put_cluster_clk_and_freq_table(struct device *cpu_dev) +{ + u32 cluster = raw_cpu_to_cluster(cpu_dev->id); + + if (!freq_table[cluster]) + return; + + clk_put(clk[cluster]); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); + dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); } static void put_cluster_clk_and_freq_table(struct device *cpu_dev) { u32 cluster = cpu_to_cluster(cpu_dev->id); + int i; - if (!atomic_dec_return(&cluster_usage[cluster])) { - clk_put(clk[cluster]); - opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); - dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); + if (atomic_dec_return(&cluster_usage[cluster])) + return; + + if (cluster < MAX_CLUSTERS) + return _put_cluster_clk_and_freq_table(cpu_dev); + + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + if (!cdev) { + pr_err("%s: failed to get cpu%d device\n", __func__, i); + return; + } + + _put_cluster_clk_and_freq_table(cdev); } + + /* free virtual table */ + kfree(freq_table[cluster]); } -static int get_cluster_clk_and_freq_table(struct device *cpu_dev) +static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) { - u32 cluster = cpu_to_cluster(cpu_dev->id); + u32 cluster = raw_cpu_to_cluster(cpu_dev->id); char name[14] = "cpu-cluster."; int ret; - if (atomic_inc_return(&cluster_usage[cluster]) != 1) + if (freq_table[cluster]) return 0; ret = arm_bL_ops->init_opp_table(cpu_dev); if (ret) { dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); - goto atomic_dec; + goto out; } - ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); if (ret) { dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); - goto atomic_dec; + goto out; } name[12] = cluster + '0'; - clk[cluster] = clk_get_sys(name, NULL); + clk[cluster] = clk_get(cpu_dev, name); if (!IS_ERR(clk[cluster])) { dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", __func__, clk[cluster], freq_table[cluster], @@ -140,15 +351,74 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev) dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", __func__, cpu_dev->id, cluster); ret = PTR_ERR(clk[cluster]); - opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); -atomic_dec: - atomic_dec(&cluster_usage[cluster]); +out: dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, cluster); return ret; } +static int get_cluster_clk_and_freq_table(struct device *cpu_dev) +{ + u32 cluster = cpu_to_cluster(cpu_dev->id); + int i, ret; + + if (atomic_inc_return(&cluster_usage[cluster]) != 1) + return 0; + + if (cluster < MAX_CLUSTERS) { + ret = _get_cluster_clk_and_freq_table(cpu_dev); + if (ret) + atomic_dec(&cluster_usage[cluster]); + return ret; + } + + /* + * Get data for all clusters and fill virtual cluster with a merge of + * both + */ + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + if (!cdev) { + pr_err("%s: failed to get cpu%d device\n", __func__, i); + return -ENODEV; + } + + ret = _get_cluster_clk_and_freq_table(cdev); + if (ret) + goto put_clusters; + } + + ret = merge_cluster_tables(); + if (ret) + goto put_clusters; + + /* Assuming 2 cluster, set clk_big_min and clk_little_max */ + clk_big_min = get_table_min(freq_table[0]); + clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); + + pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", + __func__, cluster, clk_big_min, clk_little_max); + + return 0; + +put_clusters: + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + if (!cdev) { + pr_err("%s: failed to get cpu%d device\n", __func__, i); + return -ENODEV; + } + + _put_cluster_clk_and_freq_table(cdev); + } + + atomic_dec(&cluster_usage[cluster]); + + return ret; +} + /* Per-CPU initialization */ static int bL_cpufreq_init(struct cpufreq_policy *policy) { @@ -167,7 +437,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) if (ret) return ret; - ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]); + ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); if (ret) { dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", policy->cpu, cur_cluster); @@ -175,7 +445,17 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) return ret; } - cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu); + if (cur_cluster < MAX_CLUSTERS) { + int cpu; + + cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + + for_each_cpu(cpu, policy->cpus) + per_cpu(physical_cluster, cpu) = cur_cluster; + } else { + /* Assumption: during init, we are always running on A15 */ + per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; + } if (arm_bL_ops->get_transition_latency) policy->cpuinfo.transition_latency = @@ -183,9 +463,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) else policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = bL_cpufreq_get(policy->cpu); - - cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + if (is_bL_switching_enabled()) + per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); return 0; @@ -208,27 +487,54 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy) return 0; } -/* Export freq_table to sysfs */ -static struct freq_attr *bL_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver bL_cpufreq_driver = { .name = "arm-big-little", - .flags = CPUFREQ_STICKY, - .verify = bL_cpufreq_verify_policy, - .target = bL_cpufreq_set_target, - .get = bL_cpufreq_get, + .flags = CPUFREQ_STICKY | + CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = bL_cpufreq_set_target, + .get = bL_cpufreq_get_rate, .init = bL_cpufreq_init, .exit = bL_cpufreq_exit, - .have_governor_per_policy = true, - .attr = bL_cpufreq_attr, + .attr = cpufreq_generic_attr, +}; + +static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, + unsigned long action, void *_arg) +{ + pr_debug("%s: action: %ld\n", __func__, action); + + switch (action) { + case BL_NOTIFY_PRE_ENABLE: + case BL_NOTIFY_PRE_DISABLE: + cpufreq_unregister_driver(&bL_cpufreq_driver); + break; + + case BL_NOTIFY_POST_ENABLE: + set_switching_enabled(true); + cpufreq_register_driver(&bL_cpufreq_driver); + break; + + case BL_NOTIFY_POST_DISABLE: + set_switching_enabled(false); + cpufreq_register_driver(&bL_cpufreq_driver); + break; + + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static struct notifier_block bL_switcher_notifier = { + .notifier_call = bL_cpufreq_switcher_notifier, }; int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) { - int ret; + int ret, i; if (arm_bL_ops) { pr_debug("%s: Already registered: %s, exiting\n", __func__, @@ -243,16 +549,29 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) arm_bL_ops = ops; + ret = bL_switcher_get_enabled(); + set_switching_enabled(ret); + + for (i = 0; i < MAX_CLUSTERS; i++) + mutex_init(&cluster_lock[i]); + ret = cpufreq_register_driver(&bL_cpufreq_driver); if (ret) { pr_info("%s: Failed registering platform driver: %s, err: %d\n", __func__, ops->name, ret); arm_bL_ops = NULL; } else { - pr_info("%s: Registered platform driver: %s\n", __func__, - ops->name); + ret = bL_switcher_register_notifier(&bL_switcher_notifier); + if (ret) { + cpufreq_unregister_driver(&bL_cpufreq_driver); + arm_bL_ops = NULL; + } else { + pr_info("%s: Registered platform driver: %s\n", + __func__, ops->name); + } } + bL_switcher_put_enabled(); return ret; } EXPORT_SYMBOL_GPL(bL_cpufreq_register); @@ -265,7 +584,10 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) return; } + bL_switcher_get_enabled(); + bL_switcher_unregister_notifier(&bL_switcher_notifier); cpufreq_unregister_driver(&bL_cpufreq_driver); + bL_switcher_put_enabled(); pr_info("%s: Un-registered platform driver: %s\n", __func__, arm_bL_ops->name); arm_bL_ops = NULL; diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h index 79b2ce17884..70f18fc12d4 100644 --- a/drivers/cpufreq/arm_big_little.h +++ b/drivers/cpufreq/arm_big_little.h @@ -34,11 +34,6 @@ struct cpufreq_arm_bL_ops { int (*init_opp_table)(struct device *cpu_dev); }; -static inline int cpu_to_cluster(int cpu) -{ - return topology_physical_package_id(cpu); -} - int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index fd9e3ea6a48..8d9d5910890 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c @@ -19,13 +19,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/device.h> #include <linux/export.h> #include <linux/module.h> -#include <linux/of.h> -#include <linux/opp.h> +#include <linux/of_device.h> +#include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> @@ -34,27 +33,13 @@ /* get cpu node with valid operating-points */ static struct device_node *get_cpu_node_with_valid_op(int cpu) { - struct device_node *np = NULL, *parent; - int count = 0; + struct device_node *np = of_cpu_device_node_get(cpu); - parent = of_find_node_by_path("/cpus"); - if (!parent) { - pr_err("failed to find OF /cpus\n"); - return NULL; + if (!of_get_property(np, "operating-points", NULL)) { + of_node_put(np); + np = NULL; } - for_each_child_of_node(parent, np) { - if (count++ != cpu) - continue; - if (!of_get_property(np, "operating-points", NULL)) { - of_node_put(np); - np = NULL; - } - - break; - } - - of_node_put(parent); return np; } @@ -63,11 +48,12 @@ static int dt_init_opp_table(struct device *cpu_dev) struct device_node *np; int ret; - np = get_cpu_node_with_valid_op(cpu_dev->id); - if (!np) - return -ENODATA; + np = of_node_get(cpu_dev->of_node); + if (!np) { + pr_err("failed to find cpu%d node\n", cpu_dev->id); + return -ENOENT; + } - cpu_dev->of_node = np; ret = of_init_opp_table(cpu_dev); of_node_put(np); @@ -79,9 +65,11 @@ static int dt_get_transition_latency(struct device *cpu_dev) struct device_node *np; u32 transition_latency = CPUFREQ_ETERNAL; - np = get_cpu_node_with_valid_op(cpu_dev->id); - if (!np) + np = of_node_get(cpu_dev->of_node); + if (!np) { + pr_info("Failed to find cpu node. Use CPUFREQ_ETERNAL transition latency\n"); return CPUFREQ_ETERNAL; + } of_property_read_u32(np, "clock-latency", &transition_latency); of_node_put(np); diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c index 654488723cb..7b612c8bb09 100644 --- a/drivers/cpufreq/at32ap-cpufreq.c +++ b/drivers/cpufreq/at32ap-cpufreq.c @@ -19,100 +19,104 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/slab.h> -static struct clk *cpuclk; - -static int at32_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; -} - -static unsigned int at32_get_speed(unsigned int cpu) -{ - /* No SMP support */ - if (cpu) - return 0; - return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000); -} +static struct cpufreq_frequency_table *freq_table; static unsigned int ref_freq; static unsigned long loops_per_jiffy_ref; -static int at32_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) { - struct cpufreq_freqs freqs; - long freq; - - /* Convert target_freq from kHz to Hz */ - freq = clk_round_rate(cpuclk, target_freq * 1000); - - /* Check if policy->min <= new_freq <= policy->max */ - if(freq < (policy->min * 1000) || freq > (policy->max * 1000)) - return -EINVAL; + unsigned int old_freq, new_freq; - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - - freqs.old = at32_get_speed(0); - freqs.new = (freq + 500) / 1000; - freqs.flags = 0; + old_freq = policy->cur; + new_freq = freq_table[index].frequency; if (!ref_freq) { - ref_freq = freqs.old; + ref_freq = old_freq; loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - if (freqs.old < freqs.new) + if (old_freq < new_freq) boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, freqs.new); - clk_set_rate(cpuclk, freq); - if (freqs.new < freqs.old) + loops_per_jiffy_ref, ref_freq, new_freq); + clk_set_rate(policy->clk, new_freq * 1000); + if (new_freq < old_freq) boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, freqs.new); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: set frequency %lu Hz\n", freq); + loops_per_jiffy_ref, ref_freq, new_freq); return 0; } -static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) +static int at32_cpufreq_driver_init(struct cpufreq_policy *policy) { + unsigned int frequency, rate, min_freq; + struct clk *cpuclk; + int retval, steps, i; + if (policy->cpu != 0) return -EINVAL; cpuclk = clk_get(NULL, "cpu"); if (IS_ERR(cpuclk)) { pr_debug("cpufreq: could not get CPU clk\n"); - return PTR_ERR(cpuclk); + retval = PTR_ERR(cpuclk); + goto out_err; } - policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; + min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; + frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; policy->cpuinfo.transition_latency = 0; - policy->cur = at32_get_speed(0); - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - printk("cpufreq: AT32AP CPU frequency driver\n"); + /* + * AVR32 CPU frequency rate scales in power of two between maximum and + * minimum, also add space for the table end marker. + * + * Further validate that the frequency is usable, and append it to the + * frequency table. + */ + steps = fls(frequency / min_freq) + 1; + freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table), + GFP_KERNEL); + if (!freq_table) { + retval = -ENOMEM; + goto out_err_put_clk; + } - return 0; + for (i = 0; i < (steps - 1); i++) { + rate = clk_round_rate(cpuclk, frequency * 1000) / 1000; + + if (rate != frequency) + freq_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else + freq_table[i].frequency = frequency; + + frequency /= 2; + } + + policy->clk = cpuclk; + freq_table[steps - 1].frequency = CPUFREQ_TABLE_END; + + retval = cpufreq_table_validate_and_show(policy, freq_table); + if (!retval) { + printk("cpufreq: AT32AP CPU frequency driver\n"); + return 0; + } + + kfree(freq_table); +out_err_put_clk: + clk_put(cpuclk); +out_err: + return retval; } static struct cpufreq_driver at32_driver = { .name = "at32ap", - .owner = THIS_MODULE, .init = at32_cpufreq_driver_init, - .verify = at32_verify_speed, - .target = at32_set_target, - .get = at32_get_speed, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = at32_set_target, + .get = cpufreq_generic_get, .flags = CPUFREQ_STICKY, }; diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c index 995511e80be..a9f8e5bd071 100644 --- a/drivers/cpufreq/blackfin-cpufreq.c +++ b/drivers/cpufreq/blackfin-cpufreq.c @@ -20,23 +20,23 @@ /* this is the table of CCLK frequencies, in Hz */ -/* .index is the entry in the auxiliary dpm_state_table[] */ +/* .driver_data is the entry in the auxiliary dpm_state_table[] */ static struct cpufreq_frequency_table bfin_freq_table[] = { { .frequency = CPUFREQ_TABLE_END, - .index = 0, + .driver_data = 0, }, { .frequency = CPUFREQ_TABLE_END, - .index = 1, + .driver_data = 1, }, { .frequency = CPUFREQ_TABLE_END, - .index = 2, + .driver_data = 2, }, { .frequency = CPUFREQ_TABLE_END, - .index = 0, + .driver_data = 0, }, }; @@ -127,41 +127,28 @@ unsigned long cpu_set_cclk(int cpu, unsigned long new) } #endif -static int bfin_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) +static int bfin_target(struct cpufreq_policy *policy, unsigned int index) { #ifndef CONFIG_BF60x unsigned int plldiv; #endif - unsigned int index; - unsigned long cclk_hz; - struct cpufreq_freqs freqs; static unsigned long lpj_ref; static unsigned int lpj_ref_freq; + unsigned int old_freq, new_freq; int ret = 0; #if defined(CONFIG_CYCLES_CLOCKSOURCE) cycles_t cycles; #endif - if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, - relation, &index)) - return -EINVAL; + old_freq = bfin_getfreq_khz(0); + new_freq = bfin_freq_table[index].frequency; - cclk_hz = bfin_freq_table[index].frequency; - - freqs.old = bfin_getfreq_khz(0); - freqs.new = cclk_hz; - - pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", - cclk_hz, target_freq, freqs.old); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); #ifndef CONFIG_BF60x plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; bfin_write_PLL_DIV(plldiv); #else - ret = cpu_set_cclk(policy->cpu, freqs.new * 1000); + ret = cpu_set_cclk(policy->cpu, new_freq * 1000); if (ret != 0) { WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); return ret; @@ -177,25 +164,16 @@ static int bfin_target(struct cpufreq_policy *policy, #endif if (!lpj_ref_freq) { lpj_ref = loops_per_jiffy; - lpj_ref_freq = freqs.old; + lpj_ref_freq = old_freq; } - if (freqs.new != freqs.old) { + if (new_freq != old_freq) { loops_per_jiffy = cpufreq_scale(lpj_ref, - lpj_ref_freq, freqs.new); + lpj_ref_freq, new_freq); } - /* TODO: just test case for cycles clock source, remove later */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: done\n"); return ret; } -static int bfin_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, bfin_freq_table); -} - static int __bfin_cpu_init(struct cpufreq_policy *policy) { @@ -209,24 +187,16 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ - policy->cur = cclk; - cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); - return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); + return cpufreq_table_validate_and_show(policy, bfin_freq_table); } -static struct freq_attr *bfin_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver bfin_driver = { - .verify = bfin_verify_speed, - .target = bfin_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = bfin_target, .get = bfin_getfreq_khz, .init = __bfin_cpu_init, .name = "bfin cpufreq", - .owner = THIS_MODULE, - .attr = bfin_freq_attr, + .attr = cpufreq_generic_attr, }; static int __init bfin_cpu_init(void) diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index ad1fde27766..86beda9f950 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -12,14 +12,18 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> +#include <linux/cpu.h> +#include <linux/cpu_cooling.h> #include <linux/cpufreq.h> +#include <linux/cpumask.h> #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> +#include <linux/thermal.h> static unsigned int transition_latency; static unsigned int voltage_tolerance; /* in percentage */ @@ -28,176 +32,105 @@ static struct device *cpu_dev; static struct clk *cpu_clk; static struct regulator *cpu_reg; static struct cpufreq_frequency_table *freq_table; +static struct thermal_cooling_device *cdev; -static int cpu0_verify_speed(struct cpufreq_policy *policy) +static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index) { - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int cpu0_get_speed(unsigned int cpu) -{ - return clk_get_rate(cpu_clk) / 1000; -} - -static int cpu0_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - struct cpufreq_freqs freqs; - struct opp *opp; + struct dev_pm_opp *opp; unsigned long volt = 0, volt_old = 0, tol = 0; + unsigned int old_freq, new_freq; long freq_Hz, freq_exact; - unsigned int index; int ret; - ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &index); - if (ret) { - pr_err("failed to match target freqency %d: %d\n", - target_freq, ret); - return ret; - } - freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); - if (freq_Hz < 0) + if (freq_Hz <= 0) freq_Hz = freq_table[index].frequency * 1000; - freq_exact = freq_Hz; - freqs.new = freq_Hz / 1000; - freqs.old = clk_get_rate(cpu_clk) / 1000; - - if (freqs.old == freqs.new) - return 0; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + freq_exact = freq_Hz; + new_freq = freq_Hz / 1000; + old_freq = clk_get_rate(cpu_clk) / 1000; - if (cpu_reg) { + if (!IS_ERR(cpu_reg)) { rcu_read_lock(); - opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); if (IS_ERR(opp)) { rcu_read_unlock(); pr_err("failed to find OPP for %ld\n", freq_Hz); - freqs.new = freqs.old; - ret = PTR_ERR(opp); - goto post_notify; + return PTR_ERR(opp); } - volt = opp_get_voltage(opp); + volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); tol = volt * voltage_tolerance / 100; volt_old = regulator_get_voltage(cpu_reg); } pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n", - freqs.old / 1000, volt_old ? volt_old / 1000 : -1, - freqs.new / 1000, volt ? volt / 1000 : -1); + old_freq / 1000, volt_old ? volt_old / 1000 : -1, + new_freq / 1000, volt ? volt / 1000 : -1); /* scaling up? scale voltage before frequency */ - if (cpu_reg && freqs.new > freqs.old) { + if (!IS_ERR(cpu_reg) && new_freq > old_freq) { ret = regulator_set_voltage_tol(cpu_reg, volt, tol); if (ret) { pr_err("failed to scale voltage up: %d\n", ret); - freqs.new = freqs.old; - goto post_notify; + return ret; } } ret = clk_set_rate(cpu_clk, freq_exact); if (ret) { pr_err("failed to set clock rate: %d\n", ret); - if (cpu_reg) + if (!IS_ERR(cpu_reg)) regulator_set_voltage_tol(cpu_reg, volt_old, tol); - freqs.new = freqs.old; - goto post_notify; + return ret; } /* scaling down? scale voltage after frequency */ - if (cpu_reg && freqs.new < freqs.old) { + if (!IS_ERR(cpu_reg) && new_freq < old_freq) { ret = regulator_set_voltage_tol(cpu_reg, volt, tol); if (ret) { pr_err("failed to scale voltage down: %d\n", ret); - clk_set_rate(cpu_clk, freqs.old * 1000); - freqs.new = freqs.old; + clk_set_rate(cpu_clk, old_freq * 1000); } } -post_notify: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return ret; } static int cpu0_cpufreq_init(struct cpufreq_policy *policy) { - int ret; - - ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (ret) { - pr_err("invalid frequency table: %d\n", ret); - return ret; - } - - policy->cpuinfo.transition_latency = transition_latency; - policy->cur = clk_get_rate(cpu_clk) / 1000; - - /* - * The driver only supports the SMP configuartion where all processors - * share the clock and voltage and clock. Use cpufreq affected_cpus - * interface to have all CPUs scaled together. - */ - cpumask_setall(policy->cpus); - - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - - return 0; + policy->clk = cpu_clk; + return cpufreq_generic_init(policy, freq_table, transition_latency); } -static int cpu0_cpufreq_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - - return 0; -} - -static struct freq_attr *cpu0_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver cpu0_cpufreq_driver = { - .flags = CPUFREQ_STICKY, - .verify = cpu0_verify_speed, - .target = cpu0_set_target, - .get = cpu0_get_speed, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cpu0_set_target, + .get = cpufreq_generic_get, .init = cpu0_cpufreq_init, - .exit = cpu0_cpufreq_exit, .name = "generic_cpu0", - .attr = cpu0_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int cpu0_cpufreq_probe(struct platform_device *pdev) { - struct device_node *np, *parent; + struct device_node *np; int ret; - parent = of_find_node_by_path("/cpus"); - if (!parent) { - pr_err("failed to find OF /cpus\n"); - return -ENOENT; - } - - for_each_child_of_node(parent, np) { - if (of_get_property(np, "operating-points", NULL)) - break; + cpu_dev = get_cpu_device(0); + if (!cpu_dev) { + pr_err("failed to get cpu0 device\n"); + return -ENODEV; } + np = of_node_get(cpu_dev->of_node); if (!np) { pr_err("failed to find cpu0 node\n"); - ret = -ENOENT; - goto out_put_parent; + return -ENOENT; } - cpu_dev = &pdev->dev; - cpu_dev->of_node = np; - - cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); + cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); if (IS_ERR(cpu_reg)) { /* * If cpu0 regulator supply node is present, but regulator is @@ -210,26 +143,22 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) } pr_warn("failed to get cpu0 regulator: %ld\n", PTR_ERR(cpu_reg)); - cpu_reg = NULL; } - cpu_clk = devm_clk_get(cpu_dev, NULL); + cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); pr_err("failed to get cpu0 clock: %d\n", ret); - goto out_put_node; + goto out_put_reg; } - ret = of_init_opp_table(cpu_dev); - if (ret) { - pr_err("failed to init OPP table: %d\n", ret); - goto out_put_node; - } + /* OPPs might be populated at runtime, don't check for error here */ + of_init_opp_table(cpu_dev); - ret = opp_init_cpufreq_table(cpu_dev, &freq_table); + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table: %d\n", ret); - goto out_put_node; + goto out_put_clk; } of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); @@ -237,8 +166,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; - if (cpu_reg) { - struct opp *opp; + if (!IS_ERR(cpu_reg)) { + struct dev_pm_opp *opp; unsigned long min_uV, max_uV; int i; @@ -250,12 +179,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) ; rcu_read_lock(); - opp = opp_find_freq_exact(cpu_dev, + opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); - min_uV = opp_get_voltage(opp); - opp = opp_find_freq_exact(cpu_dev, + min_uV = dev_pm_opp_get_voltage(opp); + opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[i-1].frequency * 1000, true); - max_uV = opp_get_voltage(opp); + max_uV = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); if (ret > 0) @@ -268,23 +197,38 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) goto out_free_table; } + /* + * For now, just loading the cooling device; + * thermal DT code takes care of matching them. + */ + if (of_find_property(np, "#cooling-cells", NULL)) { + cdev = of_cpufreq_cooling_register(np, cpu_present_mask); + if (IS_ERR(cdev)) + pr_err("running cpufreq without cooling device: %ld\n", + PTR_ERR(cdev)); + } + of_node_put(np); - of_node_put(parent); return 0; out_free_table: - opp_free_cpufreq_table(cpu_dev, &freq_table); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_put_clk: + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); +out_put_reg: + if (!IS_ERR(cpu_reg)) + regulator_put(cpu_reg); out_put_node: of_node_put(np); -out_put_parent: - of_node_put(parent); return ret; } static int cpu0_cpufreq_remove(struct platform_device *pdev) { + cpufreq_cooling_unregister(cdev); cpufreq_unregister_driver(&cpu0_cpufreq_driver); - opp_free_cpufreq_table(cpu_dev, &freq_table); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); return 0; } diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index af1542d4144..a2258090b58 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -270,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy, pr_debug("Old CPU frequency %d kHz, new %d kHz\n", freqs.old, freqs.new); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); /* Disable IRQs */ /* local_irq_save(flags); */ @@ -285,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy, /* Enable IRQs */ /* local_irq_restore(flags); */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); return 0; } @@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy) if (policy->min < (fsb_pol_max * fid * 100)) policy->max = (fsb_pol_max + 1) * fid * 100; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); return 0; } @@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = nforce2_get(policy->cpu); return 0; } @@ -379,11 +376,10 @@ static struct cpufreq_driver nforce2_driver = { .get = nforce2_get, .init = nforce2_cpu_init, .exit = nforce2_cpu_exit, - .owner = THIS_MODULE, }; #ifdef MODULE -static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = { +static const struct pci_device_id nforce2_ids[] = { { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2 }, {} }; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2d53f47d174..6f024852c6f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -3,6 +3,7 @@ * * Copyright (C) 2001 Russell King * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> + * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> * * Oct 2005 - Ashok Raj <ashok.raj@intel.com> * Added handling for CPU hotplug @@ -12,26 +13,21 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/notifier.h> +#include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/spinlock.h> #include <linux/device.h> -#include <linux/slab.h> -#include <linux/cpu.h> -#include <linux/completion.h> +#include <linux/init.h> +#include <linux/kernel_stat.h> +#include <linux/module.h> #include <linux/mutex.h> -#include <linux/syscore_ops.h> - +#include <linux/slab.h> +#include <linux/suspend.h> +#include <linux/tick.h> #include <trace/events/power.h> /** @@ -41,55 +37,27 @@ */ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); -#ifdef CONFIG_HOTPLUG_CPU -/* This one keeps track of the previously set governor of a removed CPU */ -static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); -#endif +static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); static DEFINE_RWLOCK(cpufreq_driver_lock); +DEFINE_MUTEX(cpufreq_governor_lock); +static LIST_HEAD(cpufreq_policy_list); -/* - * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure - * all cpufreq/hotplug/workqueue/etc related lock issues. - * - * The rules for this semaphore: - * - Any routine that wants to read from the policy structure will - * do a down_read on this semaphore. - * - Any routine that will write to the policy structure and/or may take away - * the policy altogether (eg. CPU hotplug), will hold this lock in write - * mode before doing so. - * - * Additional rules: - * - Governor routines that can be called in cpufreq hotplug path should not - * take this sem as top level hotplug notifier handler takes this. - * - Lock should not be held across - * __cpufreq_governor(data, CPUFREQ_GOV_STOP); - */ -static DEFINE_PER_CPU(int, cpufreq_policy_cpu); -static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); - -#define lock_policy_rwsem(mode, cpu) \ -static int lock_policy_rwsem_##mode(int cpu) \ -{ \ - int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ - BUG_ON(policy_cpu == -1); \ - down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ - \ - return 0; \ -} +/* This one keeps track of the previously set governor of a removed CPU */ +static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); -lock_policy_rwsem(read, cpu); -lock_policy_rwsem(write, cpu); +/* Flag to suspend/resume CPUFreq governors */ +static bool cpufreq_suspended; -#define unlock_policy_rwsem(mode, cpu) \ -static void unlock_policy_rwsem_##mode(int cpu) \ -{ \ - int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ - BUG_ON(policy_cpu == -1); \ - up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ +static inline bool has_target(void) +{ + return cpufreq_driver->target_index || cpufreq_driver->target; } -unlock_policy_rwsem(read, cpu); -unlock_policy_rwsem(write, cpu); +/* + * rwsem to guarantee that cpufreq driver module doesn't unload during critical + * sections + */ +static DECLARE_RWSEM(cpufreq_rwsem); /* internal prototypes */ static int __cpufreq_governor(struct cpufreq_policy *policy, @@ -130,82 +98,145 @@ static DEFINE_MUTEX(cpufreq_governor_mutex); bool have_governor_per_policy(void) { - return cpufreq_driver->have_governor_per_policy; + return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); } +EXPORT_SYMBOL_GPL(have_governor_per_policy); -static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) +struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) { - struct cpufreq_policy *data; - unsigned long flags; + if (have_governor_per_policy()) + return &policy->kobj; + else + return cpufreq_global_kobject; +} +EXPORT_SYMBOL_GPL(get_governor_parent_kobj); - if (cpu >= nr_cpu_ids) - goto err_out; +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; - /* get the cpufreq driver */ - read_lock_irqsave(&cpufreq_driver_lock, flags); + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - if (!cpufreq_driver) - goto err_out_unlock; + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - if (!try_module_get(cpufreq_driver->owner)) - goto err_out_unlock; + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = cputime_to_usecs(cur_wall_time); + return cputime_to_usecs(idle_time); +} - /* get the CPU */ - data = per_cpu(cpufreq_cpu_data, cpu); +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); - if (!data) - goto err_out_put_module; + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else if (!io_busy) + idle_time += get_cpu_iowait_time_us(cpu, wall); - if (!sysfs && !kobject_get(&data->kobj)) - goto err_out_put_module; + return idle_time; +} +EXPORT_SYMBOL_GPL(get_cpu_idle_time); - read_unlock_irqrestore(&cpufreq_driver_lock, flags); - return data; +/* + * This is a generic cpufreq init() routine which can be used by cpufreq + * drivers of SMP systems. It will do following: + * - validate & show freq table passed + * - set policies transition latency + * - policy->cpus with all possible CPUs + */ +int cpufreq_generic_init(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int transition_latency) +{ + int ret; -err_out_put_module: - module_put(cpufreq_driver->owner); -err_out_unlock: - read_unlock_irqrestore(&cpufreq_driver_lock, flags); -err_out: - return NULL; + ret = cpufreq_table_validate_and_show(policy, table); + if (ret) { + pr_err("%s: invalid frequency table: %d\n", __func__, ret); + return ret; + } + + policy->cpuinfo.transition_latency = transition_latency; + + /* + * The driver only supports the SMP configuartion where all processors + * share the clock and voltage and clock. + */ + cpumask_setall(policy->cpus); + + return 0; } +EXPORT_SYMBOL_GPL(cpufreq_generic_init); -struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) +unsigned int cpufreq_generic_get(unsigned int cpu) { - if (cpufreq_disabled()) - return NULL; + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); + return 0; + } - return __cpufreq_cpu_get(cpu, false); + return clk_get_rate(policy->clk) / 1000; } -EXPORT_SYMBOL_GPL(cpufreq_cpu_get); +EXPORT_SYMBOL_GPL(cpufreq_generic_get); -static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu) +/* Only for cpufreq core internal use */ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) { - return __cpufreq_cpu_get(cpu, true); + return per_cpu(cpufreq_cpu_data, cpu); } -static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) +struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { - if (!sysfs) - kobject_put(&data->kobj); - module_put(cpufreq_driver->owner); + struct cpufreq_policy *policy = NULL; + unsigned long flags; + + if (cpufreq_disabled() || (cpu >= nr_cpu_ids)) + return NULL; + + if (!down_read_trylock(&cpufreq_rwsem)) + return NULL; + + /* get the cpufreq driver */ + read_lock_irqsave(&cpufreq_driver_lock, flags); + + if (cpufreq_driver) { + /* get the CPU */ + policy = per_cpu(cpufreq_cpu_data, cpu); + if (policy) + kobject_get(&policy->kobj); + } + + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + + if (!policy) + up_read(&cpufreq_rwsem); + + return policy; } +EXPORT_SYMBOL_GPL(cpufreq_cpu_get); -void cpufreq_cpu_put(struct cpufreq_policy *data) +void cpufreq_cpu_put(struct cpufreq_policy *policy) { if (cpufreq_disabled()) return; - __cpufreq_cpu_put(data, false); + kobject_put(&policy->kobj); + up_read(&cpufreq_rwsem); } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); -static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data) -{ - __cpufreq_cpu_put(data, true); -} - /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ @@ -220,7 +251,7 @@ static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data) */ #ifndef CONFIG_SMP static unsigned long l_p_j_ref; -static unsigned int l_p_j_ref_freq; +static unsigned int l_p_j_ref_freq; static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { @@ -230,15 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; - pr_debug("saving %lu as reference value for loops_per_jiffy; " - "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); + pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", + l_p_j_ref, l_p_j_ref_freq); } - if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || - (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); - pr_debug("scaling loops_per_jiffy to %lu " - "for frequency %u kHz\n", loops_per_jiffy, ci->new); + pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", + loops_per_jiffy, ci->new); } } #else @@ -248,8 +278,7 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) } #endif - -void __cpufreq_notify_transition(struct cpufreq_policy *policy, +static void __cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, unsigned int state) { BUG_ON(irqs_disabled()); @@ -259,7 +288,7 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy, freqs->flags = cpufreq_driver->flags; pr_debug("notification %u of frequency transition to %u kHz\n", - state, freqs->new); + state, freqs->new); switch (state) { @@ -271,9 +300,8 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy, if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { - pr_debug("Warning: CPU frequency is" - " %u, cpufreq assumed %u kHz.\n", - freqs->old, policy->cur); + pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", + freqs->old, policy->cur); freqs->old = policy->cur; } } @@ -284,8 +312,8 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy, case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, - (unsigned long)freqs->cpu); + pr_debug("FREQ: %lu - CPU: %lu\n", + (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); @@ -294,6 +322,7 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy, break; } } + /** * cpufreq_notify_transition - call notifier chain and adjust_jiffies * on frequency transition. @@ -302,19 +331,106 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy, * function. It is called twice on all CPU frequency changes that have * external effects. */ -void cpufreq_notify_transition(struct cpufreq_policy *policy, +static void cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, unsigned int state) { for_each_cpu(freqs->cpu, policy->cpus) __cpufreq_notify_transition(policy, freqs, state); } -EXPORT_SYMBOL_GPL(cpufreq_notify_transition); +/* Do post notifications when there are chances that transition has failed */ +static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int transition_failed) +{ + cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); + if (!transition_failed) + return; + + swap(freqs->old, freqs->new); + cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); +} + +void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs) +{ + + /* + * Catch double invocations of _begin() which lead to self-deadlock. + * ASYNC_NOTIFICATION drivers are left out because the cpufreq core + * doesn't invoke _begin() on their behalf, and hence the chances of + * double invocations are very low. Moreover, there are scenarios + * where these checks can emit false-positive warnings in these + * drivers; so we avoid that by skipping them altogether. + */ + WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) + && current == policy->transition_task); + +wait: + wait_event(policy->transition_wait, !policy->transition_ongoing); + + spin_lock(&policy->transition_lock); + + if (unlikely(policy->transition_ongoing)) { + spin_unlock(&policy->transition_lock); + goto wait; + } + + policy->transition_ongoing = true; + policy->transition_task = current; + + spin_unlock(&policy->transition_lock); + + cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); +} +EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); + +void cpufreq_freq_transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int transition_failed) +{ + if (unlikely(WARN_ON(!policy->transition_ongoing))) + return; + + cpufreq_notify_post_transition(policy, freqs, transition_failed); + + policy->transition_ongoing = false; + policy->transition_task = NULL; + + wake_up(&policy->transition_wait); +} +EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); /********************************************************************* * SYSFS INTERFACE * *********************************************************************/ +static ssize_t show_boost(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); +} + +static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret, enable; + + ret = sscanf(buf, "%d", &enable); + if (ret != 1 || enable < 0 || enable > 1) + return -EINVAL; + + if (cpufreq_boost_trigger_state(enable)) { + pr_err("%s: Cannot %s BOOST!\n", + __func__, enable ? "enable" : "disable"); + return -EINVAL; + } + + pr_debug("%s: cpufreq BOOST %s\n", + __func__, enable ? "enabled" : "disabled"); + + return count; +} +define_one_global_rw(boost); static struct cpufreq_governor *__find_governor(const char *str_governor) { @@ -347,7 +463,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, *policy = CPUFREQ_POLICY_POWERSAVE; err = 0; } - } else if (cpufreq_driver->target) { + } else if (has_target()) { struct cpufreq_governor *t; mutex_lock(&cpufreq_governor_mutex); @@ -376,7 +492,6 @@ out: return err; } - /** * cpufreq_per_cpu_attr_read() / show_##file_name() - * print out cpufreq information @@ -399,8 +514,8 @@ show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); show_one(scaling_cur_freq, cur); -static int __cpufreq_set_policy(struct cpufreq_policy *data, - struct cpufreq_policy *policy); +static int cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_policy *new_policy); /** * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access @@ -409,7 +524,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, static ssize_t store_##file_name \ (struct cpufreq_policy *policy, const char *buf, size_t count) \ { \ - unsigned int ret; \ + int ret; \ struct cpufreq_policy new_policy; \ \ ret = cpufreq_get_policy(&new_policy, policy->cpu); \ @@ -420,7 +535,7 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ - ret = __cpufreq_set_policy(policy, &new_policy); \ + ret = cpufreq_set_policy(policy, &new_policy); \ policy->user_policy.object = policy->object; \ \ return ret ? ret : count; \ @@ -441,7 +556,6 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, return sprintf(buf, "%u\n", cur_freq); } - /** * show_scaling_governor - show the current policy for the specified CPU */ @@ -457,14 +571,13 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) return -EINVAL; } - /** * store_scaling_governor - store policy for the specified CPU */ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { - unsigned int ret; + int ret; char str_governor[16]; struct cpufreq_policy new_policy; @@ -480,9 +593,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, &new_policy.governor)) return -EINVAL; - /* Do not use cpufreq_set_policy here or the user_policy.max - will be wrongly overridden */ - ret = __cpufreq_set_policy(policy, &new_policy); + ret = cpufreq_set_policy(policy, &new_policy); policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; @@ -510,7 +621,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, ssize_t i = 0; struct cpufreq_governor *t; - if (!cpufreq_driver->target) { + if (!has_target()) { i += sprintf(buf, "performance powersave"); goto out; } @@ -526,7 +637,7 @@ out: return i; } -static ssize_t show_cpus(const struct cpumask *mask, char *buf) +ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) { ssize_t i = 0; unsigned int cpu; @@ -541,6 +652,7 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf) i += sprintf(&buf[i], "\n"); return i; } +EXPORT_SYMBOL_GPL(cpufreq_show_cpus); /** * show_related_cpus - show the CPUs affected by each transition even if @@ -548,7 +660,7 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf) */ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) { - return show_cpus(policy->related_cpus, buf); + return cpufreq_show_cpus(policy->related_cpus, buf); } /** @@ -556,7 +668,7 @@ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) */ static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) { - return show_cpus(policy->cpus, buf); + return cpufreq_show_cpus(policy->cpus, buf); } static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, @@ -630,9 +742,6 @@ static struct attribute *default_attrs[] = { NULL }; -struct kobject *cpufreq_global_kobject; -EXPORT_SYMBOL(cpufreq_global_kobject); - #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) #define to_attr(a) container_of(a, struct freq_attr, attr) @@ -640,23 +749,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret = -EINVAL; - policy = cpufreq_cpu_get_sysfs(policy->cpu); - if (!policy) - goto no_policy; + ssize_t ret; + + if (!down_read_trylock(&cpufreq_rwsem)) + return -EINVAL; - if (lock_policy_rwsem_read(policy->cpu) < 0) - goto fail; + down_read(&policy->rwsem); if (fattr->show) ret = fattr->show(policy, buf); else ret = -EIO; - unlock_policy_rwsem_read(policy->cpu); -fail: - cpufreq_cpu_put_sysfs(policy); -no_policy: + up_read(&policy->rwsem); + up_read(&cpufreq_rwsem); + return ret; } @@ -666,22 +773,28 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; - policy = cpufreq_cpu_get_sysfs(policy->cpu); - if (!policy) - goto no_policy; - if (lock_policy_rwsem_write(policy->cpu) < 0) - goto fail; + get_online_cpus(); + + if (!cpu_online(policy->cpu)) + goto unlock; + + if (!down_read_trylock(&cpufreq_rwsem)) + goto unlock; + + down_write(&policy->rwsem); if (fattr->store) ret = fattr->store(policy, buf, count); else ret = -EIO; - unlock_policy_rwsem_write(policy->cpu); -fail: - cpufreq_cpu_put_sysfs(policy); -no_policy: + up_write(&policy->rwsem); + + up_read(&cpufreq_rwsem); +unlock: + put_online_cpus(); + return ret; } @@ -703,42 +816,76 @@ static struct kobj_type ktype_cpufreq = { .release = cpufreq_sysfs_release, }; +struct kobject *cpufreq_global_kobject; +EXPORT_SYMBOL(cpufreq_global_kobject); + +static int cpufreq_global_kobject_usage; + +int cpufreq_get_global_kobject(void) +{ + if (!cpufreq_global_kobject_usage++) + return kobject_add(cpufreq_global_kobject, + &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); + + return 0; +} +EXPORT_SYMBOL(cpufreq_get_global_kobject); + +void cpufreq_put_global_kobject(void) +{ + if (!--cpufreq_global_kobject_usage) + kobject_del(cpufreq_global_kobject); +} +EXPORT_SYMBOL(cpufreq_put_global_kobject); + +int cpufreq_sysfs_create_file(const struct attribute *attr) +{ + int ret = cpufreq_get_global_kobject(); + + if (!ret) { + ret = sysfs_create_file(cpufreq_global_kobject, attr); + if (ret) + cpufreq_put_global_kobject(); + } + + return ret; +} +EXPORT_SYMBOL(cpufreq_sysfs_create_file); + +void cpufreq_sysfs_remove_file(const struct attribute *attr) +{ + sysfs_remove_file(cpufreq_global_kobject, attr); + cpufreq_put_global_kobject(); +} +EXPORT_SYMBOL(cpufreq_sysfs_remove_file); + /* symlink affected CPUs */ -static int cpufreq_add_dev_symlink(unsigned int cpu, - struct cpufreq_policy *policy) +static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) { unsigned int j; int ret = 0; for_each_cpu(j, policy->cpus) { - struct cpufreq_policy *managed_policy; struct device *cpu_dev; - if (j == cpu) + if (j == policy->cpu) continue; - pr_debug("CPU %u already managed, adding link\n", j); - managed_policy = cpufreq_cpu_get(cpu); + pr_debug("Adding link for CPU: %u\n", j); cpu_dev = get_cpu_device(j); ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); - if (ret) { - cpufreq_cpu_put(managed_policy); - return ret; - } + if (ret) + break; } return ret; } -static int cpufreq_add_dev_interface(unsigned int cpu, - struct cpufreq_policy *policy, +static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, struct device *dev) { - struct cpufreq_policy new_policy; struct freq_attr **drv_attr; - unsigned long flags; int ret = 0; - unsigned int j; /* prepare interface data */ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, @@ -759,7 +906,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, if (ret) goto err_out_kobj_put; } - if (cpufreq_driver->target) { + if (has_target()) { ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); if (ret) goto err_out_kobj_put; @@ -770,97 +917,190 @@ static int cpufreq_add_dev_interface(unsigned int cpu, goto err_out_kobj_put; } - write_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu(j, policy->cpus) { - per_cpu(cpufreq_cpu_data, j) = policy; - per_cpu(cpufreq_policy_cpu, j) = policy->cpu; - } - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - - ret = cpufreq_add_dev_symlink(cpu, policy); + ret = cpufreq_add_dev_symlink(policy); if (ret) goto err_out_kobj_put; - memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); - /* assure that the starting sequence is run in __cpufreq_set_policy */ - policy->governor = NULL; + return ret; - /* set default policy */ - ret = __cpufreq_set_policy(policy, &new_policy); - policy->user_policy.policy = policy->policy; - policy->user_policy.governor = policy->governor; +err_out_kobj_put: + kobject_put(&policy->kobj); + wait_for_completion(&policy->kobj_unregister); + return ret; +} + +static void cpufreq_init_policy(struct cpufreq_policy *policy) +{ + struct cpufreq_governor *gov = NULL; + struct cpufreq_policy new_policy; + int ret = 0; + + memcpy(&new_policy, policy, sizeof(*policy)); + /* Update governor of new_policy to the governor used before hotplug */ + gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); + if (gov) + pr_debug("Restoring governor %s for cpu %d\n", + policy->governor->name, policy->cpu); + else + gov = CPUFREQ_DEFAULT_GOVERNOR; + + new_policy.governor = gov; + + /* Use the default policy if its valid. */ + if (cpufreq_driver->setpolicy) + cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); + + /* set default policy */ + ret = cpufreq_set_policy(policy, &new_policy); if (ret) { pr_debug("setting policy failed\n"); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); } - return ret; - -err_out_kobj_put: - kobject_put(&policy->kobj); - wait_for_completion(&policy->kobj_unregister); - return ret; } #ifdef CONFIG_HOTPLUG_CPU -static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, - struct device *dev) +static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, + unsigned int cpu, struct device *dev) { - struct cpufreq_policy *policy; - int ret = 0, has_target = !!cpufreq_driver->target; + int ret = 0; unsigned long flags; - policy = cpufreq_cpu_get(sibling); - WARN_ON(!policy); - - if (has_target) - __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + if (ret) { + pr_err("%s: Failed to stop governor\n", __func__); + return ret; + } + } - lock_policy_rwsem_write(sibling); + down_write(&policy->rwsem); write_lock_irqsave(&cpufreq_driver_lock, flags); cpumask_set_cpu(cpu, policy->cpus); - per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; per_cpu(cpufreq_cpu_data, cpu) = policy; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - unlock_policy_rwsem_write(sibling); + up_write(&policy->rwsem); - if (has_target) { - __cpufreq_governor(policy, CPUFREQ_GOV_START); - __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - } + if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); - if (ret) { - cpufreq_cpu_put(policy); - return ret; + if (ret) { + pr_err("%s: Failed to start governor\n", __func__); + return ret; + } } - return 0; + return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); } #endif -/** - * cpufreq_add_dev - add a CPU device - * - * Adds the cpufreq interface for a CPU device. - * - * The Oracle says: try running cpufreq registration/unregistration concurrently - * with with cpu hotplugging and all hell will break loose. Tried to clean this - * mess up, but more thorough testing is needed. - Mathieu - */ -static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) +static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) +{ + struct cpufreq_policy *policy; + unsigned long flags; + + read_lock_irqsave(&cpufreq_driver_lock, flags); + + policy = per_cpu(cpufreq_cpu_data_fallback, cpu); + + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + + policy->governor = NULL; + + return policy; +} + +static struct cpufreq_policy *cpufreq_policy_alloc(void) +{ + struct cpufreq_policy *policy; + + policy = kzalloc(sizeof(*policy), GFP_KERNEL); + if (!policy) + return NULL; + + if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) + goto err_free_policy; + + if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) + goto err_free_cpumask; + + INIT_LIST_HEAD(&policy->policy_list); + init_rwsem(&policy->rwsem); + spin_lock_init(&policy->transition_lock); + init_waitqueue_head(&policy->transition_wait); + + return policy; + +err_free_cpumask: + free_cpumask_var(policy->cpus); +err_free_policy: + kfree(policy); + + return NULL; +} + +static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) +{ + struct kobject *kobj; + struct completion *cmp; + + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_REMOVE_POLICY, policy); + + down_read(&policy->rwsem); + kobj = &policy->kobj; + cmp = &policy->kobj_unregister; + up_read(&policy->rwsem); + kobject_put(kobj); + + /* + * We need to make sure that the underlying kobj is + * actually not referenced anymore by anybody before we + * proceed with unloading. + */ + pr_debug("waiting for dropping of refcount\n"); + wait_for_completion(cmp); + pr_debug("wait complete\n"); +} + +static void cpufreq_policy_free(struct cpufreq_policy *policy) +{ + free_cpumask_var(policy->related_cpus); + free_cpumask_var(policy->cpus); + kfree(policy); +} + +static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) +{ + if (WARN_ON(cpu == policy->cpu)) + return; + + down_write(&policy->rwsem); + + policy->last_cpu = policy->cpu; + policy->cpu = cpu; + + up_write(&policy->rwsem); + + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_UPDATE_POLICY_CPU, policy); +} + +static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { unsigned int j, cpu = dev->id; int ret = -ENOMEM; struct cpufreq_policy *policy; unsigned long flags; + bool recover_policy = cpufreq_suspended; #ifdef CONFIG_HOTPLUG_CPU - struct cpufreq_governor *gov; - int sibling; + struct cpufreq_policy *tpolicy; #endif if (cpu_is_offline(cpu)) @@ -876,43 +1116,52 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) cpufreq_cpu_put(policy); return 0; } +#endif + + if (!down_read_trylock(&cpufreq_rwsem)) + return 0; #ifdef CONFIG_HOTPLUG_CPU /* Check if this cpu was hot-unplugged earlier and has siblings */ read_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_online_cpu(sibling) { - struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); - if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { + list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { + if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { read_unlock_irqrestore(&cpufreq_driver_lock, flags); - return cpufreq_add_policy_cpu(cpu, sibling, dev); + ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); + up_read(&cpufreq_rwsem); + return ret; } } read_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif -#endif - if (!try_module_get(cpufreq_driver->owner)) { - ret = -EINVAL; - goto module_out; + /* + * Restore the saved policy when doing light-weight init and fall back + * to the full init if that fails. + */ + policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; + if (!policy) { + recover_policy = false; + policy = cpufreq_policy_alloc(); + if (!policy) + goto nomem_out; } - policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); - if (!policy) - goto nomem_out; - - if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) - goto err_free_policy; - - if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) - goto err_free_cpumask; + /* + * In the resume path, since we restore a saved policy, the assignment + * to policy->cpu is like an update of the existing policy, rather than + * the creation of a brand new one. So we need to perform this update + * by invoking update_policy_cpu(). + */ + if (recover_policy && cpu != policy->cpu) { + update_policy_cpu(policy, cpu); + WARN_ON(kobject_move(&policy->kobj, &dev->kobj)); + } else { + policy->cpu = cpu; + } - policy->cpu = cpu; - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; cpumask_copy(policy->cpus, cpumask_of(cpu)); - /* Initially set CPU itself as the policy_cpu */ - per_cpu(cpufreq_policy_cpu, cpu) = cpu; - init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); @@ -934,197 +1183,307 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) */ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); - policy->user_policy.min = policy->min; - policy->user_policy.max = policy->max; + if (!recover_policy) { + policy->user_policy.min = policy->min; + policy->user_policy.max = policy->max; + } + + down_write(&policy->rwsem); + write_lock_irqsave(&cpufreq_driver_lock, flags); + for_each_cpu(j, policy->cpus) + per_cpu(cpufreq_cpu_data, j) = policy; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { + policy->cur = cpufreq_driver->get(policy->cpu); + if (!policy->cur) { + pr_err("%s: ->get() failed\n", __func__); + goto err_get_freq; + } + } + + /* + * Sometimes boot loaders set CPU frequency to a value outside of + * frequency table present with cpufreq core. In such cases CPU might be + * unstable if it has to run on that frequency for long duration of time + * and so its better to set it to a frequency which is specified in + * freq-table. This also makes cpufreq stats inconsistent as + * cpufreq-stats would fail to register because current frequency of CPU + * isn't found in freq-table. + * + * Because we don't want this change to effect boot process badly, we go + * for the next freq which is >= policy->cur ('cur' must be set by now, + * otherwise we will end up setting freq to lowest of the table as 'cur' + * is initialized to zero). + * + * We are passing target-freq as "policy->cur - 1" otherwise + * __cpufreq_driver_target() would simply fail, as policy->cur will be + * equal to target-freq. + */ + if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) + && has_target()) { + /* Are we running at unknown frequency ? */ + ret = cpufreq_frequency_table_get_index(policy, policy->cur); + if (ret == -EINVAL) { + /* Warn user and fix it */ + pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", + __func__, policy->cpu, policy->cur); + ret = __cpufreq_driver_target(policy, policy->cur - 1, + CPUFREQ_RELATION_L); + + /* + * Reaching here after boot in a few seconds may not + * mean that system will remain stable at "unknown" + * frequency for longer duration. Hence, a BUG_ON(). + */ + BUG_ON(ret); + pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", + __func__, policy->cpu, policy->cur); + } + } blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); -#ifdef CONFIG_HOTPLUG_CPU - gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); - if (gov) { - policy->governor = gov; - pr_debug("Restoring governor %s for cpu %d\n", - policy->governor->name, cpu); + if (!recover_policy) { + ret = cpufreq_add_dev_interface(policy, dev); + if (ret) + goto err_out_unregister; + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_CREATE_POLICY, policy); } -#endif - ret = cpufreq_add_dev_interface(cpu, policy, dev); - if (ret) - goto err_out_unregister; + write_lock_irqsave(&cpufreq_driver_lock, flags); + list_add(&policy->policy_list, &cpufreq_policy_list); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + cpufreq_init_policy(policy); + + if (!recover_policy) { + policy->user_policy.policy = policy->policy; + policy->user_policy.governor = policy->governor; + } + up_write(&policy->rwsem); kobject_uevent(&policy->kobj, KOBJ_ADD); - module_put(cpufreq_driver->owner); + up_read(&cpufreq_rwsem); + pr_debug("initialization complete\n"); return 0; err_out_unregister: +err_get_freq: write_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) per_cpu(cpufreq_cpu_data, j) = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - kobject_put(&policy->kobj); - wait_for_completion(&policy->kobj_unregister); - + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); err_set_policy_cpu: - per_cpu(cpufreq_policy_cpu, cpu) = -1; - free_cpumask_var(policy->related_cpus); -err_free_cpumask: - free_cpumask_var(policy->cpus); -err_free_policy: - kfree(policy); + if (recover_policy) { + /* Do not leave stale fallback data behind. */ + per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; + cpufreq_policy_put_kobj(policy); + } + cpufreq_policy_free(policy); + nomem_out: - module_put(cpufreq_driver->owner); -module_out: + up_read(&cpufreq_rwsem); + return ret; } -static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) +/** + * cpufreq_add_dev - add a CPU device + * + * Adds the cpufreq interface for a CPU device. + * + * The Oracle says: try running cpufreq registration/unregistration concurrently + * with with cpu hotplugging and all hell will break loose. Tried to clean this + * mess up, but more thorough testing is needed. - Mathieu + */ +static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) +{ + return __cpufreq_add_dev(dev, sif); +} + +static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, + unsigned int old_cpu) { - int j; + struct device *cpu_dev; + int ret; - policy->last_cpu = policy->cpu; - policy->cpu = cpu; + /* first sibling now owns the new sysfs dir */ + cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); - for_each_cpu(j, policy->cpus) - per_cpu(cpufreq_policy_cpu, j) = cpu; + sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); + ret = kobject_move(&policy->kobj, &cpu_dev->kobj); + if (ret) { + pr_err("%s: Failed to move kobj: %d\n", __func__, ret); -#ifdef CONFIG_CPU_FREQ_TABLE - cpufreq_frequency_table_update_policy_cpu(policy); -#endif - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_UPDATE_POLICY_CPU, policy); + down_write(&policy->rwsem); + cpumask_set_cpu(old_cpu, policy->cpus); + up_write(&policy->rwsem); + + ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, + "cpufreq"); + + return -EINVAL; + } + + return cpu_dev->id; } -/** - * __cpufreq_remove_dev - remove a CPU device - * - * Removes the cpufreq interface for a CPU device. - * Caller should already have policy_rwsem in write mode for this CPU. - * This routine frees the rwsem before returning. - */ -static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) +static int __cpufreq_remove_dev_prepare(struct device *dev, + struct subsys_interface *sif) { - unsigned int cpu = dev->id, ret, cpus; + unsigned int cpu = dev->id, cpus; + int new_cpu, ret; unsigned long flags; - struct cpufreq_policy *data; - struct kobject *kobj; - struct completion *cmp; - struct device *cpu_dev; + struct cpufreq_policy *policy; pr_debug("%s: unregistering CPU %u\n", __func__, cpu); write_lock_irqsave(&cpufreq_driver_lock, flags); - data = per_cpu(cpufreq_cpu_data, cpu); - per_cpu(cpufreq_cpu_data, cpu) = NULL; + policy = per_cpu(cpufreq_cpu_data, cpu); + + /* Save the policy somewhere when doing a light-weight tear-down */ + if (cpufreq_suspended) + per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (!data) { + if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); return -EINVAL; } - if (cpufreq_driver->target) - __cpufreq_governor(data, CPUFREQ_GOV_STOP); + if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + if (ret) { + pr_err("%s: Failed to stop governor\n", __func__); + return ret; + } + } -#ifdef CONFIG_HOTPLUG_CPU if (!cpufreq_driver->setpolicy) strncpy(per_cpu(cpufreq_cpu_governor, cpu), - data->governor->name, CPUFREQ_NAME_LEN); -#endif + policy->governor->name, CPUFREQ_NAME_LEN); - WARN_ON(lock_policy_rwsem_write(cpu)); - cpus = cpumask_weight(data->cpus); + down_read(&policy->rwsem); + cpus = cpumask_weight(policy->cpus); + up_read(&policy->rwsem); - if (cpus > 1) - cpumask_clear_cpu(cpu, data->cpus); - unlock_policy_rwsem_write(cpu); - - if (cpu != data->cpu) { + if (cpu != policy->cpu) { sysfs_remove_link(&dev->kobj, "cpufreq"); } else if (cpus > 1) { - /* first sibling now owns the new sysfs dir */ - cpu_dev = get_cpu_device(cpumask_first(data->cpus)); - sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); - ret = kobject_move(&data->kobj, &cpu_dev->kobj); - if (ret) { - pr_err("%s: Failed to move kobj: %d", __func__, ret); + new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); + if (new_cpu >= 0) { + update_policy_cpu(policy, new_cpu); - WARN_ON(lock_policy_rwsem_write(cpu)); - cpumask_set_cpu(cpu, data->cpus); + if (!cpufreq_suspended) + pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", + __func__, new_cpu, cpu); + } + } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) { + cpufreq_driver->stop_cpu(policy); + } - write_lock_irqsave(&cpufreq_driver_lock, flags); - per_cpu(cpufreq_cpu_data, cpu) = data; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); + return 0; +} - unlock_policy_rwsem_write(cpu); +static int __cpufreq_remove_dev_finish(struct device *dev, + struct subsys_interface *sif) +{ + unsigned int cpu = dev->id, cpus; + int ret; + unsigned long flags; + struct cpufreq_policy *policy; - ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, - "cpufreq"); - return -EINVAL; - } + read_lock_irqsave(&cpufreq_driver_lock, flags); + policy = per_cpu(cpufreq_cpu_data, cpu); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); - WARN_ON(lock_policy_rwsem_write(cpu)); - update_policy_cpu(data, cpu_dev->id); - unlock_policy_rwsem_write(cpu); - pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", - __func__, cpu_dev->id, cpu); + if (!policy) { + pr_debug("%s: No cpu_data found\n", __func__); + return -EINVAL; } - if ((cpus == 1) && (cpufreq_driver->target)) - __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); + down_write(&policy->rwsem); + cpus = cpumask_weight(policy->cpus); - pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); - cpufreq_cpu_put(data); + if (cpus > 1) + cpumask_clear_cpu(cpu, policy->cpus); + up_write(&policy->rwsem); /* If cpu is last user of policy, free policy */ if (cpus == 1) { - lock_policy_rwsem_read(cpu); - kobj = &data->kobj; - cmp = &data->kobj_unregister; - unlock_policy_rwsem_read(cpu); - kobject_put(kobj); - - /* we need to make sure that the underlying kobj is actually - * not referenced anymore by anybody before we proceed with - * unloading. - */ - pr_debug("waiting for dropping of refcount\n"); - wait_for_completion(cmp); - pr_debug("wait complete\n"); + if (has_target()) { + ret = __cpufreq_governor(policy, + CPUFREQ_GOV_POLICY_EXIT); + if (ret) { + pr_err("%s: Failed to exit governor\n", + __func__); + return ret; + } + } + + if (!cpufreq_suspended) + cpufreq_policy_put_kobj(policy); + /* + * Perform the ->exit() even during light-weight tear-down, + * since this is a core component, and is essential for the + * subsequent light-weight ->init() to succeed. + */ if (cpufreq_driver->exit) - cpufreq_driver->exit(data); + cpufreq_driver->exit(policy); + + /* Remove policy from list of active policies */ + write_lock_irqsave(&cpufreq_driver_lock, flags); + list_del(&policy->policy_list); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + if (!cpufreq_suspended) + cpufreq_policy_free(policy); + } else if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - free_cpumask_var(data->related_cpus); - free_cpumask_var(data->cpus); - kfree(data); - } else if (cpufreq_driver->target) { - __cpufreq_governor(data, CPUFREQ_GOV_START); - __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); + if (ret) { + pr_err("%s: Failed to start governor\n", __func__); + return ret; + } } - per_cpu(cpufreq_policy_cpu, cpu) = -1; + per_cpu(cpufreq_cpu_data, cpu) = NULL; return 0; } - +/** + * cpufreq_remove_dev - remove a CPU device + * + * Removes the cpufreq interface for a CPU device. + */ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; - int retval; + int ret; if (cpu_is_offline(cpu)) return 0; - retval = __cpufreq_remove_dev(dev, sif); - return retval; -} + ret = __cpufreq_remove_dev_prepare(dev, sif); + + if (!ret) + ret = __cpufreq_remove_dev_finish(dev, sif); + return ret; +} static void handle_update(struct work_struct *work) { @@ -1136,7 +1495,8 @@ static void handle_update(struct work_struct *work) } /** - * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. + * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're + * in deep trouble. * @cpu: cpu number * @old_freq: CPU frequency the kernel thinks the CPU runs at * @new_freq: CPU frequency the CPU actually runs at @@ -1151,9 +1511,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, struct cpufreq_freqs freqs; unsigned long flags; - - pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " - "core thinks of %u, is %u kHz.\n", old_freq, new_freq); + pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", + old_freq, new_freq); freqs.old = old_freq; freqs.new = new_freq; @@ -1162,11 +1521,10 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, policy = per_cpu(cpufreq_cpu_data, cpu); read_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_begin(policy, &freqs); + cpufreq_freq_transition_end(policy, &freqs, 0); } - /** * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur * @cpu: CPU number @@ -1212,7 +1570,6 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_quick_get_max); - static unsigned int __cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); @@ -1244,22 +1601,17 @@ static unsigned int __cpufreq_get(unsigned int cpu) */ unsigned int cpufreq_get(unsigned int cpu) { - unsigned int ret_freq = 0; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + unsigned int ret_freq = 0; - if (!policy) - goto out; - - if (unlikely(lock_policy_rwsem_read(cpu))) - goto out_policy; - - ret_freq = __cpufreq_get(cpu); + if (policy) { + down_read(&policy->rwsem); + ret_freq = __cpufreq_get(cpu); + up_read(&policy->rwsem); - unlock_policy_rwsem_read(cpu); + cpufreq_cpu_put(policy); + } -out_policy: - cpufreq_cpu_put(policy); -out: return ret_freq; } EXPORT_SYMBOL(cpufreq_get); @@ -1271,84 +1623,103 @@ static struct subsys_interface cpufreq_interface = { .remove_dev = cpufreq_remove_dev, }; +/* + * In case platform wants some specific frequency to be configured + * during suspend.. + */ +int cpufreq_generic_suspend(struct cpufreq_policy *policy) +{ + int ret; + + if (!policy->suspend_freq) { + pr_err("%s: suspend_freq can't be zero\n", __func__); + return -EINVAL; + } + + pr_debug("%s: Setting suspend-freq: %u\n", __func__, + policy->suspend_freq); + + ret = __cpufreq_driver_target(policy, policy->suspend_freq, + CPUFREQ_RELATION_H); + if (ret) + pr_err("%s: unable to set suspend-freq: %u. err: %d\n", + __func__, policy->suspend_freq, ret); + + return ret; +} +EXPORT_SYMBOL(cpufreq_generic_suspend); /** - * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. + * cpufreq_suspend() - Suspend CPUFreq governors * - * This function is only executed for the boot processor. The other CPUs - * have been put offline by means of CPU hotplug. + * Called during system wide Suspend/Hibernate cycles for suspending governors + * as some platforms can't change frequency after this point in suspend cycle. + * Because some of the devices (like: i2c, regulators, etc) they use for + * changing frequency are suspended quickly after this point. */ -static int cpufreq_bp_suspend(void) +void cpufreq_suspend(void) { - int ret = 0; + struct cpufreq_policy *policy; - int cpu = smp_processor_id(); - struct cpufreq_policy *cpu_policy; + if (!cpufreq_driver) + return; - pr_debug("suspending cpu %u\n", cpu); + if (!has_target()) + return; - /* If there's no policy for the boot CPU, we have nothing to do. */ - cpu_policy = cpufreq_cpu_get(cpu); - if (!cpu_policy) - return 0; + pr_debug("%s: Suspending Governors\n", __func__); - if (cpufreq_driver->suspend) { - ret = cpufreq_driver->suspend(cpu_policy); - if (ret) - printk(KERN_ERR "cpufreq: suspend failed in ->suspend " - "step on CPU %u\n", cpu_policy->cpu); + list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { + if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) + pr_err("%s: Failed to stop governor for policy: %p\n", + __func__, policy); + else if (cpufreq_driver->suspend + && cpufreq_driver->suspend(policy)) + pr_err("%s: Failed to suspend driver: %p\n", __func__, + policy); } - cpufreq_cpu_put(cpu_policy); - return ret; + cpufreq_suspended = true; } /** - * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. + * cpufreq_resume() - Resume CPUFreq governors * - * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) - * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are - * restored. It will verify that the current freq is in sync with - * what we believe it to be. This is a bit later than when it - * should be, but nonethteless it's better than calling - * cpufreq_driver->get() here which might re-enable interrupts... - * - * This function is only executed for the boot CPU. The other CPUs have not - * been turned on yet. + * Called during system wide Suspend/Hibernate cycle for resuming governors that + * are suspended with cpufreq_suspend(). */ -static void cpufreq_bp_resume(void) +void cpufreq_resume(void) { - int ret = 0; - - int cpu = smp_processor_id(); - struct cpufreq_policy *cpu_policy; + struct cpufreq_policy *policy; - pr_debug("resuming cpu %u\n", cpu); + if (!cpufreq_driver) + return; - /* If there's no policy for the boot CPU, we have nothing to do. */ - cpu_policy = cpufreq_cpu_get(cpu); - if (!cpu_policy) + if (!has_target()) return; - if (cpufreq_driver->resume) { - ret = cpufreq_driver->resume(cpu_policy); - if (ret) { - printk(KERN_ERR "cpufreq: resume failed in ->resume " - "step on CPU %u\n", cpu_policy->cpu); - goto fail; - } - } + pr_debug("%s: Resuming Governors\n", __func__); - schedule_work(&cpu_policy->update); + cpufreq_suspended = false; -fail: - cpufreq_cpu_put(cpu_policy); -} + list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { + if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) + pr_err("%s: Failed to resume driver: %p\n", __func__, + policy); + else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) + || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) + pr_err("%s: Failed to start governor for policy: %p\n", + __func__, policy); -static struct syscore_ops cpufreq_syscore_ops = { - .suspend = cpufreq_bp_suspend, - .resume = cpufreq_bp_resume, -}; + /* + * schedule call cpufreq_update_policy() for boot CPU, i.e. last + * policy in list. It will verify that the current freq is in + * sync with what we believe it to be. + */ + if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) + schedule_work(&policy->update); + } +} /** * cpufreq_get_current_driver - return current driver's name @@ -1408,11 +1779,10 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) } EXPORT_SYMBOL(cpufreq_register_notifier); - /** * cpufreq_unregister_notifier - unregister a driver with cpufreq * @nb: notifier block to be unregistered - * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER * * Remove a driver from the CPU frequency notifier list. * @@ -1448,13 +1818,92 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); * GOVERNORS * *********************************************************************/ +/* Must set freqs->new to intermediate frequency */ +static int __target_intermediate(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int index) +{ + int ret; + + freqs->new = cpufreq_driver->get_intermediate(policy, index); + + /* We don't need to switch to intermediate freq */ + if (!freqs->new) + return 0; + + pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", + __func__, policy->cpu, freqs->old, freqs->new); + + cpufreq_freq_transition_begin(policy, freqs); + ret = cpufreq_driver->target_intermediate(policy, index); + cpufreq_freq_transition_end(policy, freqs, ret); + + if (ret) + pr_err("%s: Failed to change to intermediate frequency: %d\n", + __func__, ret); + + return ret; +} + +static int __target_index(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *freq_table, int index) +{ + struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; + unsigned int intermediate_freq = 0; + int retval = -EINVAL; + bool notify; + + notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); + if (notify) { + /* Handle switching to intermediate frequency */ + if (cpufreq_driver->get_intermediate) { + retval = __target_intermediate(policy, &freqs, index); + if (retval) + return retval; + + intermediate_freq = freqs.new; + /* Set old freq to intermediate */ + if (intermediate_freq) + freqs.old = freqs.new; + } + + freqs.new = freq_table[index].frequency; + pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", + __func__, policy->cpu, freqs.old, freqs.new); + + cpufreq_freq_transition_begin(policy, &freqs); + } + + retval = cpufreq_driver->target_index(policy, index); + if (retval) + pr_err("%s: Failed to change cpu frequency: %d\n", __func__, + retval); + + if (notify) { + cpufreq_freq_transition_end(policy, &freqs, retval); + + /* + * Failed after setting to intermediate freq? Driver should have + * reverted back to initial frequency and so should we. Check + * here for intermediate_freq instead of get_intermediate, in + * case we have't switched to intermediate freq at all. + */ + if (unlikely(retval && intermediate_freq)) { + freqs.old = intermediate_freq; + freqs.new = policy->restore_freq; + cpufreq_freq_transition_begin(policy, &freqs); + cpufreq_freq_transition_end(policy, &freqs, 0); + } + } + + return retval; +} int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int retval = -EINVAL; unsigned int old_target_freq = target_freq; + int retval = -EINVAL; if (cpufreq_disabled()) return -ENODEV; @@ -1466,14 +1915,48 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, target_freq = policy->min; pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", - policy->cpu, target_freq, relation, old_target_freq); + policy->cpu, target_freq, relation, old_target_freq); + /* + * This might look like a redundant call as we are checking it again + * after finding index. But it is left intentionally for cases where + * exactly same freq is called again and so we can save on few function + * calls. + */ if (target_freq == policy->cur) return 0; + /* Save last value to restore later on errors */ + policy->restore_freq = policy->cur; + if (cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); + else if (cpufreq_driver->target_index) { + struct cpufreq_frequency_table *freq_table; + int index; + freq_table = cpufreq_frequency_get_table(policy->cpu); + if (unlikely(!freq_table)) { + pr_err("%s: Unable to find freq_table\n", __func__); + goto out; + } + + retval = cpufreq_frequency_table_target(policy, freq_table, + target_freq, relation, &index); + if (unlikely(retval)) { + pr_err("%s: Unable to find matching freq\n", __func__); + goto out; + } + + if (freq_table[index].frequency == policy->cur) { + retval = 0; + goto out; + } + + retval = __target_index(policy, freq_table, index); + } + +out: return retval; } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); @@ -1484,45 +1967,16 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, { int ret = -EINVAL; - policy = cpufreq_cpu_get(policy->cpu); - if (!policy) - goto no_policy; - - if (unlikely(lock_policy_rwsem_write(policy->cpu))) - goto fail; + down_write(&policy->rwsem); ret = __cpufreq_driver_target(policy, target_freq, relation); - unlock_policy_rwsem_write(policy->cpu); + up_write(&policy->rwsem); -fail: - cpufreq_cpu_put(policy); -no_policy: return ret; } EXPORT_SYMBOL_GPL(cpufreq_driver_target); -int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) -{ - int ret = 0; - - if (cpufreq_disabled()) - return ret; - - if (!cpufreq_driver->getavg) - return 0; - - policy = cpufreq_cpu_get(policy->cpu); - if (!policy) - return -EINVAL; - - ret = cpufreq_driver->getavg(policy, cpu); - - cpufreq_cpu_put(policy); - return ret; -} -EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); - /* * when "event" is CPUFREQ_GOV_LIMITS */ @@ -1542,26 +1996,44 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, struct cpufreq_governor *gov = NULL; #endif + /* Don't start any governor operations if we are entering suspend */ + if (cpufreq_suspended) + return 0; + if (policy->governor->max_transition_latency && policy->cpuinfo.transition_latency > policy->governor->max_transition_latency) { if (!gov) return -EINVAL; else { - printk(KERN_WARNING "%s governor failed, too long" - " transition latency of HW, fallback" - " to %s governor\n", - policy->governor->name, - gov->name); + pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", + policy->governor->name, gov->name); policy->governor = gov; } } - if (!try_module_get(policy->governor->owner)) - return -EINVAL; + if (event == CPUFREQ_GOV_POLICY_INIT) + if (!try_module_get(policy->governor->owner)) + return -EINVAL; pr_debug("__cpufreq_governor for CPU %u, event %u\n", - policy->cpu, event); + policy->cpu, event); + + mutex_lock(&cpufreq_governor_lock); + if ((policy->governor_enabled && event == CPUFREQ_GOV_START) + || (!policy->governor_enabled + && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { + mutex_unlock(&cpufreq_governor_lock); + return -EBUSY; + } + + if (event == CPUFREQ_GOV_STOP) + policy->governor_enabled = false; + else if (event == CPUFREQ_GOV_START) + policy->governor_enabled = true; + + mutex_unlock(&cpufreq_governor_lock); + ret = policy->governor->governor(policy, event); if (!ret) { @@ -1569,19 +2041,23 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, policy->governor->initialized++; else if (event == CPUFREQ_GOV_POLICY_EXIT) policy->governor->initialized--; + } else { + /* Restore original values */ + mutex_lock(&cpufreq_governor_lock); + if (event == CPUFREQ_GOV_STOP) + policy->governor_enabled = true; + else if (event == CPUFREQ_GOV_START) + policy->governor_enabled = false; + mutex_unlock(&cpufreq_governor_lock); } - /* we keep one module reference alive for - each CPU governed by this CPU */ - if ((event != CPUFREQ_GOV_START) || ret) - module_put(policy->governor->owner); - if ((event == CPUFREQ_GOV_STOP) && !ret) + if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || + ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) module_put(policy->governor->owner); return ret; } - int cpufreq_register_governor(struct cpufreq_governor *governor) { int err; @@ -1606,12 +2082,9 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) } EXPORT_SYMBOL_GPL(cpufreq_register_governor); - void cpufreq_unregister_governor(struct cpufreq_governor *governor) { -#ifdef CONFIG_HOTPLUG_CPU int cpu; -#endif if (!governor) return; @@ -1619,14 +2092,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) if (cpufreq_disabled()) return; -#ifdef CONFIG_HOTPLUG_CPU for_each_present_cpu(cpu) { if (cpu_online(cpu)) continue; if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); } -#endif mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); @@ -1636,7 +2107,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); - /********************************************************************* * POLICY INTERFACE * *********************************************************************/ @@ -1658,118 +2128,107 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) if (!cpu_policy) return -EINVAL; - memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); + memcpy(policy, cpu_policy, sizeof(*policy)); cpufreq_cpu_put(cpu_policy); return 0; } EXPORT_SYMBOL(cpufreq_get_policy); - /* - * data : current policy. - * policy : policy to be set. + * policy : current policy. + * new_policy: policy to be set. */ -static int __cpufreq_set_policy(struct cpufreq_policy *data, - struct cpufreq_policy *policy) +static int cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_policy *new_policy) { - int ret = 0, failed = 1; + struct cpufreq_governor *old_gov; + int ret; - pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, - policy->min, policy->max); + pr_debug("setting new policy for CPU %u: %u - %u kHz\n", + new_policy->cpu, new_policy->min, new_policy->max); - memcpy(&policy->cpuinfo, &data->cpuinfo, - sizeof(struct cpufreq_cpuinfo)); + memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); - if (policy->min > data->max || policy->max < data->min) { - ret = -EINVAL; - goto error_out; - } + if (new_policy->min > policy->max || new_policy->max < policy->min) + return -EINVAL; /* verify the cpu speed can be set within this limit */ - ret = cpufreq_driver->verify(policy); + ret = cpufreq_driver->verify(new_policy); if (ret) - goto error_out; + return ret; /* adjust if necessary - all reasons */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_ADJUST, policy); + CPUFREQ_ADJUST, new_policy); /* adjust if necessary - hardware incompatibility*/ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_INCOMPATIBLE, policy); + CPUFREQ_INCOMPATIBLE, new_policy); - /* verify the cpu speed can be set within this limit, - which might be different to the first one */ - ret = cpufreq_driver->verify(policy); + /* + * verify the cpu speed can be set within this limit, which might be + * different to the first one + */ + ret = cpufreq_driver->verify(new_policy); if (ret) - goto error_out; + return ret; /* notification of the new policy */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_NOTIFY, policy); + CPUFREQ_NOTIFY, new_policy); - data->min = policy->min; - data->max = policy->max; + policy->min = new_policy->min; + policy->max = new_policy->max; pr_debug("new min and max freqs are %u - %u kHz\n", - data->min, data->max); + policy->min, policy->max); if (cpufreq_driver->setpolicy) { - data->policy = policy->policy; + policy->policy = new_policy->policy; pr_debug("setting range\n"); - ret = cpufreq_driver->setpolicy(policy); - } else { - if (policy->governor != data->governor) { - /* save old, working values */ - struct cpufreq_governor *old_gov = data->governor; - - pr_debug("governor switch\n"); - - /* end old governor */ - if (data->governor) { - __cpufreq_governor(data, CPUFREQ_GOV_STOP); - unlock_policy_rwsem_write(policy->cpu); - __cpufreq_governor(data, - CPUFREQ_GOV_POLICY_EXIT); - lock_policy_rwsem_write(policy->cpu); - } + return cpufreq_driver->setpolicy(new_policy); + } - /* start new governor */ - data->governor = policy->governor; - if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { - if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) { - failed = 0; - } else { - unlock_policy_rwsem_write(policy->cpu); - __cpufreq_governor(data, - CPUFREQ_GOV_POLICY_EXIT); - lock_policy_rwsem_write(policy->cpu); - } - } + if (new_policy->governor == policy->governor) + goto out; - if (failed) { - /* new governor failed, so re-start old one */ - pr_debug("starting governor %s failed\n", - data->governor->name); - if (old_gov) { - data->governor = old_gov; - __cpufreq_governor(data, - CPUFREQ_GOV_POLICY_INIT); - __cpufreq_governor(data, - CPUFREQ_GOV_START); - } - ret = -EINVAL; - goto error_out; - } - /* might be a policy change, too, so fall through */ - } - pr_debug("governor: change or update limits\n"); - __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); + pr_debug("governor switch\n"); + + /* save old, working values */ + old_gov = policy->governor; + /* end old governor */ + if (old_gov) { + __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + up_write(&policy->rwsem); + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + down_write(&policy->rwsem); } -error_out: - return ret; + /* start new governor */ + policy->governor = new_policy->governor; + if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { + if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) + goto out; + + up_write(&policy->rwsem); + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + down_write(&policy->rwsem); + } + + /* new governor failed, so re-start old one */ + pr_debug("starting governor %s failed\n", policy->governor->name); + if (old_gov) { + policy->governor = old_gov; + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); + __cpufreq_governor(policy, CPUFREQ_GOV_START); + } + + return -EINVAL; + + out: + pr_debug("governor: change or update limits\n"); + return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); } /** @@ -1781,53 +2240,54 @@ error_out: */ int cpufreq_update_policy(unsigned int cpu) { - struct cpufreq_policy *data = cpufreq_cpu_get(cpu); - struct cpufreq_policy policy; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy new_policy; int ret; - if (!data) { - ret = -ENODEV; - goto no_policy; - } + if (!policy) + return -ENODEV; - if (unlikely(lock_policy_rwsem_write(cpu))) { - ret = -EINVAL; - goto fail; - } + down_write(&policy->rwsem); pr_debug("updating policy for CPU %u\n", cpu); - memcpy(&policy, data, sizeof(struct cpufreq_policy)); - policy.min = data->user_policy.min; - policy.max = data->user_policy.max; - policy.policy = data->user_policy.policy; - policy.governor = data->user_policy.governor; - - /* BIOS might change freq behind our back - -> ask driver for current freq and notify governors about a change */ - if (cpufreq_driver->get) { - policy.cur = cpufreq_driver->get(cpu); - if (!data->cur) { - pr_debug("Driver did not initialize current freq"); - data->cur = policy.cur; + memcpy(&new_policy, policy, sizeof(*policy)); + new_policy.min = policy->user_policy.min; + new_policy.max = policy->user_policy.max; + new_policy.policy = policy->user_policy.policy; + new_policy.governor = policy->user_policy.governor; + + /* + * BIOS might change freq behind our back + * -> ask driver for current freq and notify governors about a change + */ + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { + new_policy.cur = cpufreq_driver->get(cpu); + if (WARN_ON(!new_policy.cur)) { + ret = -EIO; + goto unlock; + } + + if (!policy->cur) { + pr_debug("Driver did not initialize current freq\n"); + policy->cur = new_policy.cur; } else { - if (data->cur != policy.cur && cpufreq_driver->target) - cpufreq_out_of_sync(cpu, data->cur, - policy.cur); + if (policy->cur != new_policy.cur && has_target()) + cpufreq_out_of_sync(cpu, policy->cur, + new_policy.cur); } } - ret = __cpufreq_set_policy(data, &policy); + ret = cpufreq_set_policy(policy, &new_policy); - unlock_policy_rwsem_write(cpu); +unlock: + up_write(&policy->rwsem); -fail: - cpufreq_cpu_put(data); -no_policy: + cpufreq_cpu_put(policy); return ret; } EXPORT_SYMBOL(cpufreq_update_policy); -static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, +static int cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -1835,16 +2295,21 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, dev = get_cpu_device(cpu); if (dev) { - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: - cpufreq_add_dev(dev, NULL); + __cpufreq_add_dev(dev, NULL); break; + case CPU_DOWN_PREPARE: - case CPU_UP_CANCELED_FROZEN: - __cpufreq_remove_dev(dev, NULL); + __cpufreq_remove_dev_prepare(dev, NULL); + break; + + case CPU_POST_DEAD: + __cpufreq_remove_dev_finish(dev, NULL); break; + case CPU_DOWN_FAILED: - cpufreq_add_dev(dev, NULL); + __cpufreq_add_dev(dev, NULL); break; } } @@ -1852,10 +2317,77 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, } static struct notifier_block __refdata cpufreq_cpu_notifier = { - .notifier_call = cpufreq_cpu_callback, + .notifier_call = cpufreq_cpu_callback, }; /********************************************************************* + * BOOST * + *********************************************************************/ +static int cpufreq_boost_set_sw(int state) +{ + struct cpufreq_frequency_table *freq_table; + struct cpufreq_policy *policy; + int ret = -EINVAL; + + list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { + freq_table = cpufreq_frequency_get_table(policy->cpu); + if (freq_table) { + ret = cpufreq_frequency_table_cpuinfo(policy, + freq_table); + if (ret) { + pr_err("%s: Policy frequency update failed\n", + __func__); + break; + } + policy->user_policy.max = policy->max; + __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + } + } + + return ret; +} + +int cpufreq_boost_trigger_state(int state) +{ + unsigned long flags; + int ret = 0; + + if (cpufreq_driver->boost_enabled == state) + return 0; + + write_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver->boost_enabled = state; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + ret = cpufreq_driver->set_boost(state); + if (ret) { + write_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver->boost_enabled = !state; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + pr_err("%s: Cannot %s BOOST\n", + __func__, state ? "enable" : "disable"); + } + + return ret; +} + +int cpufreq_boost_supported(void) +{ + if (likely(cpufreq_driver)) + return cpufreq_driver->boost_supported; + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_boost_supported); + +int cpufreq_boost_enabled(void) +{ + return cpufreq_driver->boost_enabled; +} +EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); + +/********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * *********************************************************************/ @@ -1864,7 +2396,7 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = { * @driver_data: A struct cpufreq_driver containing the values# * submitted by the CPU Frequency driver. * - * Registers a CPU Frequency driver to this core code. This code + * Registers a CPU Frequency driver to this core code. This code * returns zero on success, -EBUSY when another driver got here first * (and isn't unregistered in the meantime). * @@ -1878,7 +2410,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) return -ENODEV; if (!driver_data || !driver_data->verify || !driver_data->init || - ((!driver_data->setpolicy) && (!driver_data->target))) + !(driver_data->setpolicy || driver_data->target_index || + driver_data->target) || + (driver_data->setpolicy && (driver_data->target_index || + driver_data->target)) || + (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) return -EINVAL; pr_debug("trying to register driver %s\n", driver_data->name); @@ -1889,14 +2425,30 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) write_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver) { write_unlock_irqrestore(&cpufreq_driver_lock, flags); - return -EBUSY; + return -EEXIST; } cpufreq_driver = driver_data; write_unlock_irqrestore(&cpufreq_driver_lock, flags); + if (cpufreq_boost_supported()) { + /* + * Check if driver provides function to enable boost - + * if not, use cpufreq_boost_set_sw as default + */ + if (!cpufreq_driver->set_boost) + cpufreq_driver->set_boost = cpufreq_boost_set_sw; + + ret = cpufreq_sysfs_create_file(&boost.attr); + if (ret) { + pr_err("%s: cannot register global BOOST sysfs file\n", + __func__); + goto err_null_driver; + } + } + ret = subsys_interface_register(&cpufreq_interface); if (ret) - goto err_null_driver; + goto err_boost_unreg; if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { int i; @@ -1912,7 +2464,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) /* if all ->init() calls failed, unregister */ if (ret) { pr_debug("no CPU initialized for driver %s\n", - driver_data->name); + driver_data->name); goto err_if_unreg; } } @@ -1923,6 +2475,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) return 0; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); +err_boost_unreg: + if (cpufreq_boost_supported()) + cpufreq_sysfs_remove_file(&boost.attr); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; @@ -1931,11 +2486,10 @@ err_null_driver: } EXPORT_SYMBOL_GPL(cpufreq_register_driver); - /** * cpufreq_unregister_driver - unregister the current CPUFreq driver * - * Unregister the current CPUFreq driver. Only call this if you have + * Unregister the current CPUFreq driver. Only call this if you have * the right to do so, i.e. if you have succeeded in initialising before! * Returns zero if successful, and -EINVAL if the cpufreq_driver is * currently not initialised. @@ -1950,11 +2504,18 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) pr_debug("unregistering driver %s\n", driver->name); subsys_interface_unregister(&cpufreq_interface); + if (cpufreq_boost_supported()) + cpufreq_sysfs_remove_file(&boost.attr); + unregister_hotcpu_notifier(&cpufreq_cpu_notifier); + down_write(&cpufreq_rwsem); write_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver = NULL; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + up_write(&cpufreq_rwsem); return 0; } @@ -1962,19 +2523,11 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); static int __init cpufreq_core_init(void) { - int cpu; - if (cpufreq_disabled()) return -ENODEV; - for_each_possible_cpu(cpu) { - per_cpu(cpufreq_policy_cpu, cpu) = -1; - init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); - } - - cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); + cpufreq_global_kobject = kobject_create(); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); return 0; } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 0ceb2eff5a7..25a70d06c5b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -11,19 +11,7 @@ * published by the Free Software Foundation. */ -#include <linux/cpufreq.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/kernel_stat.h> -#include <linux/kobject.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/notifier.h> -#include <linux/percpu-defs.h> #include <linux/slab.h> -#include <linux/sysfs.h> -#include <linux/types.h> - #include "cpufreq_governor.h" /* Conservative governor macros */ @@ -79,6 +67,7 @@ static void cs_check_cpu(int cpu, unsigned int load) return; dbs_info->requested_freq += get_freq_target(cs_tuners, policy); + if (dbs_info->requested_freq > policy->max) dbs_info->requested_freq = policy->max; @@ -94,14 +83,17 @@ static void cs_check_cpu(int cpu, unsigned int load) /* Check for frequency decrease */ if (load < cs_tuners->down_threshold) { + unsigned int freq_target; /* * if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; - dbs_info->requested_freq -= get_freq_target(cs_tuners, policy); - if (dbs_info->requested_freq < policy->min) + freq_target = get_freq_target(cs_tuners, policy); + if (dbs_info->requested_freq > freq_target) + dbs_info->requested_freq -= freq_target; + else dbs_info->requested_freq = policy->min; __cpufreq_driver_target(policy, dbs_info->requested_freq, @@ -221,8 +213,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, return count; } -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, - size_t count) +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, + const char *buf, size_t count) { struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input, j; @@ -235,10 +227,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, if (input > 1) input = 1; - if (input == cs_tuners->ignore_nice) /* nothing to do */ + if (input == cs_tuners->ignore_nice_load) /* nothing to do */ return count; - cs_tuners->ignore_nice = input; + cs_tuners->ignore_nice_load = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { @@ -246,7 +238,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, dbs_info = &per_cpu(cs_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->cdbs.prev_cpu_wall, 0); - if (cs_tuners->ignore_nice) + if (cs_tuners->ignore_nice_load) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } @@ -279,7 +271,7 @@ show_store_one(cs, sampling_rate); show_store_one(cs, sampling_down_factor); show_store_one(cs, up_threshold); show_store_one(cs, down_threshold); -show_store_one(cs, ignore_nice); +show_store_one(cs, ignore_nice_load); show_store_one(cs, freq_step); declare_show_sampling_rate_min(cs); @@ -287,7 +279,7 @@ gov_sys_pol_attr_rw(sampling_rate); gov_sys_pol_attr_rw(sampling_down_factor); gov_sys_pol_attr_rw(up_threshold); gov_sys_pol_attr_rw(down_threshold); -gov_sys_pol_attr_rw(ignore_nice); +gov_sys_pol_attr_rw(ignore_nice_load); gov_sys_pol_attr_rw(freq_step); gov_sys_pol_attr_ro(sampling_rate_min); @@ -297,7 +289,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { &sampling_down_factor_gov_sys.attr, &up_threshold_gov_sys.attr, &down_threshold_gov_sys.attr, - &ignore_nice_gov_sys.attr, + &ignore_nice_load_gov_sys.attr, &freq_step_gov_sys.attr, NULL }; @@ -313,7 +305,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { &sampling_down_factor_gov_pol.attr, &up_threshold_gov_pol.attr, &down_threshold_gov_pol.attr, - &ignore_nice_gov_pol.attr, + &ignore_nice_load_gov_pol.attr, &freq_step_gov_pol.attr, NULL }; @@ -329,7 +321,7 @@ static int cs_init(struct dbs_data *dbs_data) { struct cs_dbs_tuners *tuners; - tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL); + tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); if (!tuners) { pr_err("%s: kzalloc failed\n", __func__); return -ENOMEM; @@ -338,7 +330,7 @@ static int cs_init(struct dbs_data *dbs_data) tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; - tuners->ignore_nice = 0; + tuners->ignore_nice_load = 0; tuners->freq_step = DEF_FREQUENCY_STEP; dbs_data->tuners = tuners; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index dc9b72e25c1..1b44496b2d2 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -16,28 +16,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <asm/cputime.h> -#include <linux/cpufreq.h> -#include <linux/cpumask.h> #include <linux/export.h> #include <linux/kernel_stat.h> -#include <linux/mutex.h> #include <linux/slab.h> -#include <linux/tick.h> -#include <linux/types.h> -#include <linux/workqueue.h> -#include <linux/cpu.h> #include "cpufreq_governor.h" -static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) -{ - if (have_governor_per_policy()) - return &policy->kobj; - else - return cpufreq_global_kobject; -} - static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) { if (have_governor_per_policy()) @@ -46,59 +30,39 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) return dbs_data->cdata->attr_group_gov_sys; } -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = cputime_to_usecs(cur_wall_time); - - return cputime_to_usecs(idle_time); -} - -u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - else if (!io_busy) - idle_time += get_cpu_iowait_time_us(cpu, wall); - - return idle_time; -} -EXPORT_SYMBOL_GPL(get_cpu_idle_time); - void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) { struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cpufreq_policy *policy; + unsigned int sampling_rate; unsigned int max_load = 0; unsigned int ignore_nice; unsigned int j; - if (dbs_data->cdata->governor == GOV_ONDEMAND) - ignore_nice = od_tuners->ignore_nice; - else - ignore_nice = cs_tuners->ignore_nice; + if (dbs_data->cdata->governor == GOV_ONDEMAND) { + struct od_cpu_dbs_info_s *od_dbs_info = + dbs_data->cdata->get_cpu_dbs_info_s(cpu); + + /* + * Sometimes, the ondemand governor uses an additional + * multiplier to give long delays. So apply this multiplier to + * the 'sampling_rate', so as to keep the wake-up-from-idle + * detection logic a bit conservative. + */ + sampling_rate = od_tuners->sampling_rate; + sampling_rate *= od_dbs_info->rate_mult; + + ignore_nice = od_tuners->ignore_nice_load; + } else { + sampling_rate = cs_tuners->sampling_rate; + ignore_nice = cs_tuners->ignore_nice_load; + } policy = cdbs->cur_policy; - /* Get Absolute Load (in terms of freq for ondemand gov) */ + /* Get Absolute Load */ for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs; u64 cur_wall_time, cur_idle_time; @@ -147,14 +111,45 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) if (unlikely(!wall_time || wall_time < idle_time)) continue; - load = 100 * (wall_time - idle_time) / wall_time; - - if (dbs_data->cdata->governor == GOV_ONDEMAND) { - int freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; + /* + * If the CPU had gone completely idle, and a task just woke up + * on this CPU now, it would be unfair to calculate 'load' the + * usual way for this elapsed time-window, because it will show + * near-zero load, irrespective of how CPU intensive that task + * actually is. This is undesirable for latency-sensitive bursty + * workloads. + * + * To avoid this, we reuse the 'load' from the previous + * time-window and give this task a chance to start with a + * reasonably high CPU frequency. (However, we shouldn't over-do + * this copy, lest we get stuck at a high load (high frequency) + * for too long, even when the current system load has actually + * dropped down. So we perform the copy only once, upon the + * first wake-up from idle.) + * + * Detecting this situation is easy: the governor's deferrable + * timer would not have fired during CPU-idle periods. Hence + * an unusually large 'wall_time' (as compared to the sampling + * rate) indicates this scenario. + * + * prev_load can be zero in two cases and we must recalculate it + * for both cases: + * - during long idle intervals + * - explicitly set to zero + */ + if (unlikely(wall_time > (2 * sampling_rate) && + j_cdbs->prev_load)) { + load = j_cdbs->prev_load; - load *= freq_avg; + /* + * Perform a destructive copy, to ensure that we copy + * the previous load only once, upon the first wake-up + * from idle. + */ + j_cdbs->prev_load = 0; + } else { + load = 100 * (wall_time - idle_time) / wall_time; + j_cdbs->prev_load = load; } if (load > max_load) @@ -178,14 +173,26 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, { int i; + mutex_lock(&cpufreq_governor_lock); + if (!policy->governor_enabled) + goto out_unlock; + if (!all_cpus) { - __gov_queue_work(smp_processor_id(), dbs_data, delay); + /* + * Use raw_smp_processor_id() to avoid preemptible warnings. + * We know that this is only called with all_cpus == false from + * works that have been queued with *_work_on() functions and + * those works are canceled during CPU_DOWN_PREPARE so they + * can't possibly run on any other CPU. + */ + __gov_queue_work(raw_smp_processor_id(), dbs_data, delay); } else { - get_online_cpus(); for_each_cpu(i, policy->cpus) __gov_queue_work(i, dbs_data, delay); - put_online_cpus(); } + +out_unlock: + mutex_unlock(&cpufreq_governor_lock); } EXPORT_SYMBOL_GPL(gov_queue_work); @@ -278,6 +285,9 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, return rc; } + if (!have_governor_per_policy()) + WARN_ON(cpufreq_get_global_kobject()); + rc = sysfs_create_group(get_governor_parent_kobj(policy), get_sysfs_attr(dbs_data)); if (rc) { @@ -288,7 +298,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, policy->governor_data = dbs_data; - /* policy latency is in nS. Convert it to uS first */ + /* policy latency is in ns. Convert it to us first */ latency = policy->cpuinfo.transition_latency / 1000; if (latency == 0) latency = 1; @@ -316,6 +326,9 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr(dbs_data)); + if (!have_governor_per_policy()) + cpufreq_put_global_kobject(); + if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && (policy->governor->initialized == 1)) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; @@ -339,12 +352,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, cs_tuners = dbs_data->tuners; cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = cs_tuners->sampling_rate; - ignore_nice = cs_tuners->ignore_nice; + ignore_nice = cs_tuners->ignore_nice_load; } else { od_tuners = dbs_data->tuners; od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = od_tuners->sampling_rate; - ignore_nice = od_tuners->ignore_nice; + ignore_nice = od_tuners->ignore_nice_load; od_ops = dbs_data->cdata->gov_ops; io_busy = od_tuners->io_is_busy; } @@ -359,11 +372,18 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); + unsigned int prev_load; j_cdbs->cpu = j; j_cdbs->cur_policy = policy; j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); + + prev_load = (unsigned int) + (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); + j_cdbs->prev_load = 100 * prev_load / + (unsigned int) j_cdbs->prev_cpu_wall; + if (ignore_nice) j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; @@ -373,10 +393,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_data->cdata->gov_dbs_timer); } - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { cs_dbs_info->down_skip = 0; cs_dbs_info->enable = 1; @@ -404,12 +420,18 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, mutex_lock(&dbs_data->mutex); mutex_destroy(&cpu_cdbs->timer_mutex); + cpu_cdbs->cur_policy = NULL; mutex_unlock(&dbs_data->mutex); break; case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_data->mutex); + if (!cpu_cdbs->cur_policy) { + mutex_unlock(&dbs_data->mutex); + break; + } mutex_lock(&cpu_cdbs->timer_mutex); if (policy->max < cpu_cdbs->cur_policy->cur) __cpufreq_driver_target(cpu_cdbs->cur_policy, @@ -419,6 +441,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, policy->min, CPUFREQ_RELATION_L); dbs_check_cpu(dbs_data, cpu); mutex_unlock(&cpu_cdbs->timer_mutex); + mutex_unlock(&dbs_data->mutex); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index e16a96130cb..cc401d147e7 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -18,19 +18,18 @@ #define _CPUFREQ_GOVERNOR_H #include <linux/cpufreq.h> -#include <linux/kobject.h> +#include <linux/kernel_stat.h> +#include <linux/module.h> #include <linux/mutex.h> -#include <linux/workqueue.h> -#include <linux/sysfs.h> /* * The polling frequency depends on the capability of the processor. Default * polling frequency is 1000 times the transition latency of the processor. The - * governor will work on any processor with transition latency <= 10mS, using + * governor will work on any processor with transition latency <= 10ms, using * appropriate sampling rate. * - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. All times here are in uS. + * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. All times here are in us (micro seconds). */ #define MIN_SAMPLING_RATE_RATIO (2) #define LATENCY_MULTIPLIER (1000) @@ -81,7 +80,7 @@ static ssize_t show_##file_name##_gov_sys \ return sprintf(buf, "%u\n", tuners->file_name); \ } \ \ -static ssize_t show_##file_name##_gov_pol \ +static ssize_t show_##file_name##_gov_pol \ (struct cpufreq_policy *policy, char *buf) \ { \ struct dbs_data *dbs_data = policy->governor_data; \ @@ -91,7 +90,7 @@ static ssize_t show_##file_name##_gov_pol \ #define store_one(_gov, file_name) \ static ssize_t store_##file_name##_gov_sys \ -(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \ +(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \ { \ struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \ return store_##file_name(dbs_data, buf, count); \ @@ -135,6 +134,13 @@ struct cpu_dbs_common_info { u64 prev_cpu_idle; u64 prev_cpu_wall; u64 prev_cpu_nice; + /* + * Used to keep track of load in the previous interval. However, when + * explicitly set to zero, it is used as a flag to ensure that we copy + * the previous load to the current interval only once, upon the first + * wake-up from idle. + */ + unsigned int prev_load; struct cpufreq_policy *cur_policy; struct delayed_work work; /* @@ -163,19 +169,18 @@ struct cs_cpu_dbs_info_s { unsigned int enable:1; }; -/* Per policy Governers sysfs tunables */ +/* Per policy Governors sysfs tunables */ struct od_dbs_tuners { - unsigned int ignore_nice; + unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; - unsigned int adj_up_threshold; unsigned int powersave_bias; unsigned int io_is_busy; }; struct cs_dbs_tuners { - unsigned int ignore_nice; + unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; @@ -183,7 +188,7 @@ struct cs_dbs_tuners { unsigned int freq_step; }; -/* Common Governer data across policies */ +/* Common Governor data across policies */ struct dbs_data; struct common_dbs_data { /* Common across governors */ @@ -193,7 +198,10 @@ struct common_dbs_data { struct attribute_group *attr_group_gov_sys; /* one governor - system */ struct attribute_group *attr_group_gov_pol; /* one governor - policy */ - /* Common data for platforms that don't set have_governor_per_policy */ + /* + * Common data for platforms that don't set + * CPUFREQ_HAVE_GOVERNOR_PER_POLICY + */ struct dbs_data *gdbs_data; struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); @@ -207,7 +215,7 @@ struct common_dbs_data { void *gov_ops; }; -/* Governer Per policy data */ +/* Governor Per policy data */ struct dbs_data { struct common_dbs_data *cdata; unsigned int min_sampling_rate; @@ -223,7 +231,7 @@ struct od_ops { void (*powersave_bias_init_cpu)(int cpu); unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, unsigned int freq_next, unsigned int relation); - void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq); + void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq); }; struct cs_ops { @@ -256,7 +264,8 @@ static ssize_t show_sampling_rate_min_gov_pol \ return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ } -u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); +extern struct mutex cpufreq_governor_lock; + void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4b9bb5def6f..18d40918909 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -12,28 +12,16 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/cpufreq.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/kernel_stat.h> -#include <linux/kobject.h> -#include <linux/module.h> -#include <linux/mutex.h> +#include <linux/cpu.h> #include <linux/percpu-defs.h> #include <linux/slab.h> -#include <linux/sysfs.h> #include <linux/tick.h> -#include <linux/types.h> -#include <linux/cpu.h> - #include "cpufreq_governor.h" /* On-demand governor macros */ -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (100000) -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) #define MICRO_FREQUENCY_UP_THRESHOLD (95) #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MIN_FREQUENCY_UP_THRESHOLD (11) @@ -47,6 +35,8 @@ static struct od_ops od_ops; static struct cpufreq_governor cpufreq_gov_ondemand; #endif +static unsigned int default_powersave_bias; + static void ondemand_powersave_bias_init_cpu(int cpu) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); @@ -142,31 +132,27 @@ static void ondemand_powersave_bias_init(void) } } -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) { - struct dbs_data *dbs_data = p->governor_data; + struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; if (od_tuners->powersave_bias) - freq = od_ops.powersave_bias_target(p, freq, + freq = od_ops.powersave_bias_target(policy, freq, CPUFREQ_RELATION_H); - else if (p->cur == p->max) + else if (policy->cur == policy->max) return; - __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ? + __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ? CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); } /* * Every sampling_rate, we check, if current idle time is less than 20% - * (default), then we try to increase frequency. Every sampling_rate, we look - * for the lowest frequency which can sustain the load while keeping idle time - * over 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. Frequency reduction - * happens at minimum steps of 5% (default) of current frequency + * (default), then we try to increase frequency. Else, we adjust the frequency + * proportional to load. */ -static void od_check_cpu(int cpu, unsigned int load_freq) +static void od_check_cpu(int cpu, unsigned int load) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; @@ -176,36 +162,20 @@ static void od_check_cpu(int cpu, unsigned int load_freq) dbs_info->freq_lo = 0; /* Check for frequency increase */ - if (load_freq > od_tuners->up_threshold * policy->cur) { + if (load > od_tuners->up_threshold) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); - return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; - - /* - * The optimal frequency is the frequency that is the lowest that can - * support the current CPU usage without triggering the up policy. To be - * safe, we focus 10 points under the threshold. - */ - if (load_freq < od_tuners->adj_up_threshold - * policy->cur) { + } else { + /* Calculate the next frequency proportional to load */ unsigned int freq_next; - freq_next = load_freq / od_tuners->adj_up_threshold; + freq_next = load * policy->cpuinfo.max_freq / 100; /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; - if (freq_next < policy->min) - freq_next = policy->min; - if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); @@ -372,9 +342,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } - /* Calculate the new adj_up_threshold */ - od_tuners->adj_up_threshold += input; - od_tuners->adj_up_threshold -= od_tuners->up_threshold; od_tuners->up_threshold = input; return count; @@ -401,8 +368,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, return count; } -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, - size_t count) +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, + const char *buf, size_t count) { struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input; @@ -417,10 +384,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, if (input > 1) input = 1; - if (input == od_tuners->ignore_nice) { /* nothing to do */ + if (input == od_tuners->ignore_nice_load) { /* nothing to do */ return count; } - od_tuners->ignore_nice = input; + od_tuners->ignore_nice_load = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { @@ -428,7 +395,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); - if (od_tuners->ignore_nice) + if (od_tuners->ignore_nice_load) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; @@ -459,7 +426,7 @@ show_store_one(od, sampling_rate); show_store_one(od, io_is_busy); show_store_one(od, up_threshold); show_store_one(od, sampling_down_factor); -show_store_one(od, ignore_nice); +show_store_one(od, ignore_nice_load); show_store_one(od, powersave_bias); declare_show_sampling_rate_min(od); @@ -467,7 +434,7 @@ gov_sys_pol_attr_rw(sampling_rate); gov_sys_pol_attr_rw(io_is_busy); gov_sys_pol_attr_rw(up_threshold); gov_sys_pol_attr_rw(sampling_down_factor); -gov_sys_pol_attr_rw(ignore_nice); +gov_sys_pol_attr_rw(ignore_nice_load); gov_sys_pol_attr_rw(powersave_bias); gov_sys_pol_attr_ro(sampling_rate_min); @@ -476,7 +443,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { &sampling_rate_gov_sys.attr, &up_threshold_gov_sys.attr, &sampling_down_factor_gov_sys.attr, - &ignore_nice_gov_sys.attr, + &ignore_nice_load_gov_sys.attr, &powersave_bias_gov_sys.attr, &io_is_busy_gov_sys.attr, NULL @@ -492,7 +459,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { &sampling_rate_gov_pol.attr, &up_threshold_gov_pol.attr, &sampling_down_factor_gov_pol.attr, - &ignore_nice_gov_pol.attr, + &ignore_nice_load_gov_pol.attr, &powersave_bias_gov_pol.attr, &io_is_busy_gov_pol.attr, NULL @@ -511,7 +478,7 @@ static int od_init(struct dbs_data *dbs_data) u64 idle_time; int cpu; - tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL); + tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); if (!tuners) { pr_err("%s: kzalloc failed\n", __func__); return -ENOMEM; @@ -523,8 +490,6 @@ static int od_init(struct dbs_data *dbs_data) if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; /* * In nohz/micro accounting case we set the minimum frequency * not depending on HZ, but fixed (very low). The deferred @@ -533,8 +498,6 @@ static int od_init(struct dbs_data *dbs_data) dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; } else { tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; - tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - - DEF_FREQUENCY_DOWN_DIFFERENTIAL; /* For correct statistics, we need 10 ticks for each measure */ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * @@ -542,8 +505,8 @@ static int od_init(struct dbs_data *dbs_data) } tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; - tuners->ignore_nice = 0; - tuners->powersave_bias = 0; + tuners->ignore_nice_load = 0; + tuners->powersave_bias = default_powersave_bias; tuners->io_is_busy = should_io_be_busy(); dbs_data->tuners = tuners; @@ -585,6 +548,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias) unsigned int cpu; cpumask_t done; + default_powersave_bias = powersave_bias; cpumask_clear(&done); get_online_cpus(); @@ -593,11 +557,17 @@ static void od_set_powersave_bias(unsigned int powersave_bias) continue; policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; - dbs_data = policy->governor_data; - od_tuners = dbs_data->tuners; - od_tuners->powersave_bias = powersave_bias; + if (!policy) + continue; cpumask_or(&done, &done, policy->cpus); + + if (policy->governor != &cpufreq_gov_ondemand) + continue; + + dbs_data = policy->governor_data; + od_tuners = dbs_data->tuners; + od_tuners->powersave_bias = default_powersave_bias; } put_online_cpus(); } diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c new file mode 100644 index 00000000000..c0c6f4a4ecc --- /dev/null +++ b/drivers/cpufreq/cpufreq_opp.c @@ -0,0 +1,110 @@ +/* + * Generic OPP helper interface for CPUFreq drivers + * + * Copyright (C) 2009-2014 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/pm_opp.h> +#include <linux/rcupdate.h> +#include <linux/slab.h> + +/** + * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Since we just use the regular accessor functions to access the internal data + * structures, we use RCU read lock inside this function. As a result, users of + * this function DONOT need to use explicit locks for invoking. + */ +int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct dev_pm_opp *opp; + struct cpufreq_frequency_table *freq_table = NULL; + int i, max_opps, ret = 0; + unsigned long rate; + + rcu_read_lock(); + + max_opps = dev_pm_opp_get_opp_count(dev); + if (max_opps <= 0) { + ret = max_opps ? max_opps : -ENODATA; + goto out; + } + + freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL); + if (!freq_table) { + ret = -ENOMEM; + goto out; + } + + for (i = 0, rate = 0; i < max_opps; i++, rate++) { + /* find next rate */ + opp = dev_pm_opp_find_freq_ceil(dev, &rate); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto out; + } + freq_table[i].driver_data = i; + freq_table[i].frequency = rate / 1000; + } + + freq_table[i].driver_data = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + +out: + rcu_read_unlock(); + if (ret) + kfree(freq_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); + +/** + * dev_pm_opp_free_cpufreq_table() - free the cpufreq table + * @dev: device for which we do this operation + * @table: table to free + * + * Free up the table allocated by dev_pm_opp_init_cpufreq_table + */ +void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + if (!table) + return; + + kfree(*table); + *table = NULL; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index ceee06849b9..cf117deb39b 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -12,11 +12,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/module.h> #include <linux/cpufreq.h> #include <linux/init.h> - +#include <linux/module.h> static int cpufreq_governor_performance(struct cpufreq_policy *policy, unsigned int event) @@ -44,19 +42,16 @@ struct cpufreq_governor cpufreq_gov_performance = { .owner = THIS_MODULE, }; - static int __init cpufreq_gov_performance_init(void) { return cpufreq_register_governor(&cpufreq_gov_performance); } - static void __exit cpufreq_gov_performance_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_performance); } - MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("CPUfreq policy governor 'performance'"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 2d948a17115..e3b874c235e 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -1,7 +1,7 @@ /* - * linux/drivers/cpufreq/cpufreq_powersave.c + * linux/drivers/cpufreq/cpufreq_powersave.c * - * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> + * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * * * This program is free software; you can redistribute it and/or modify @@ -12,10 +12,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/module.h> #include <linux/cpufreq.h> #include <linux/init.h> +#include <linux/module.h> static int cpufreq_governor_powersave(struct cpufreq_policy *policy, unsigned int event) @@ -48,13 +47,11 @@ static int __init cpufreq_gov_powersave_init(void) return cpufreq_register_governor(&cpufreq_gov_powersave); } - static void __exit cpufreq_gov_powersave_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_powersave); } - MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index fb65decffa2..0cd9b4dcef9 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -9,25 +9,18 @@ * published by the Free Software Foundation. */ -#include <linux/kernel.h> -#include <linux/slab.h> #include <linux/cpu.h> -#include <linux/sysfs.h> #include <linux/cpufreq.h> #include <linux/module.h> -#include <linux/jiffies.h> -#include <linux/percpu.h> -#include <linux/kobject.h> -#include <linux/spinlock.h> -#include <linux/notifier.h> -#include <asm/cputime.h> +#include <linux/slab.h> +#include <linux/cputime.h> static spinlock_t cpufreq_stats_lock; struct cpufreq_stats { unsigned int cpu; unsigned int total_trans; - unsigned long long last_time; + unsigned long long last_time; unsigned int max_state; unsigned int state_num; unsigned int last_index; @@ -81,7 +74,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) for (i = 0; i < stat->state_num; i++) { len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], (unsigned long long) - cputime64_to_clock_t(stat->time_in_state[i])); + jiffies_64_to_clock_t(stat->time_in_state[i])); } return len; } @@ -116,7 +109,7 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", stat->freq_table[i]); - for (j = 0; j < stat->state_num; j++) { + for (j = 0; j < stat->state_num; j++) { if (len >= PAGE_SIZE) break; len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", @@ -158,76 +151,62 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) return -1; } -/* should be called late in the CPU removal sequence so that the stats - * memory is still available in case someone tries to use it. - */ -static void cpufreq_stats_free_table(unsigned int cpu) +static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) { - struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); + struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); - if (stat) { - pr_debug("%s: Free stat table\n", __func__); - kfree(stat->time_in_state); - kfree(stat); - per_cpu(cpufreq_stats_table, cpu) = NULL; - } + if (!stat) + return; + + pr_debug("%s: Free stat table\n", __func__); + + sysfs_remove_group(&policy->kobj, &stats_attr_group); + kfree(stat->time_in_state); + kfree(stat); + per_cpu(cpufreq_stats_table, policy->cpu) = NULL; } -/* must be called early in the CPU removal sequence (before - * cpufreq_remove_dev) so that policy is still valid. - */ -static void cpufreq_stats_free_sysfs(unsigned int cpu) +static void cpufreq_stats_free_table(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy; + policy = cpufreq_cpu_get(cpu); if (!policy) return; - if (!cpufreq_frequency_get_table(cpu)) - goto put_ref; + if (cpufreq_frequency_get_table(policy->cpu)) + __cpufreq_stats_free_table(policy); - if (!policy_is_shared(policy)) { - pr_debug("%s: Free sysfs stat\n", __func__); - sysfs_remove_group(&policy->kobj, &stats_attr_group); - } - -put_ref: cpufreq_cpu_put(policy); } -static int cpufreq_stats_create_table(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table) +static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) { - unsigned int i, j, count = 0, ret = 0; + unsigned int i, count = 0, ret = 0; struct cpufreq_stats *stat; - struct cpufreq_policy *data; unsigned int alloc_size; unsigned int cpu = policy->cpu; + struct cpufreq_frequency_table *pos, *table; + + table = cpufreq_frequency_get_table(cpu); + if (unlikely(!table)) + return 0; + if (per_cpu(cpufreq_stats_table, cpu)) return -EBUSY; - stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL); + stat = kzalloc(sizeof(*stat), GFP_KERNEL); if ((stat) == NULL) return -ENOMEM; - data = cpufreq_cpu_get(cpu); - if (data == NULL) { - ret = -EINVAL; - goto error_get_fail; - } - - ret = sysfs_create_group(&data->kobj, &stats_attr_group); + ret = sysfs_create_group(&policy->kobj, &stats_attr_group); if (ret) goto error_out; stat->cpu = cpu; per_cpu(cpufreq_stats_table, cpu) = stat; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) - continue; + cpufreq_for_each_valid_entry(pos, table) count++; - } alloc_size = count * sizeof(int) + count * sizeof(u64); @@ -238,36 +217,48 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); if (!stat->time_in_state) { ret = -ENOMEM; - goto error_out; + goto error_alloc; } stat->freq_table = (unsigned int *)(stat->time_in_state + count); #ifdef CONFIG_CPU_FREQ_STAT_DETAILS stat->trans_table = stat->freq_table + count; #endif - j = 0; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) - continue; - if (freq_table_get_index(stat, freq) == -1) - stat->freq_table[j++] = freq; - } - stat->state_num = j; + i = 0; + cpufreq_for_each_valid_entry(pos, table) + if (freq_table_get_index(stat, pos->frequency) == -1) + stat->freq_table[i++] = pos->frequency; + stat->state_num = i; spin_lock(&cpufreq_stats_lock); stat->last_time = get_jiffies_64(); stat->last_index = freq_table_get_index(stat, policy->cur); spin_unlock(&cpufreq_stats_lock); - cpufreq_cpu_put(data); return 0; +error_alloc: + sysfs_remove_group(&policy->kobj, &stats_attr_group); error_out: - cpufreq_cpu_put(data); -error_get_fail: kfree(stat); per_cpu(cpufreq_stats_table, cpu) = NULL; return ret; } +static void cpufreq_stats_create_table(unsigned int cpu) +{ + struct cpufreq_policy *policy; + + /* + * "likely(!policy)" because normally cpufreq_stats will be registered + * before cpufreq driver + */ + policy = cpufreq_cpu_get(cpu); + if (likely(!policy)) + return; + + __cpufreq_stats_create_table(policy); + + cpufreq_cpu_put(policy); +} + static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) { struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, @@ -284,25 +275,20 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) static int cpufreq_stat_notifier_policy(struct notifier_block *nb, unsigned long val, void *data) { - int ret; + int ret = 0; struct cpufreq_policy *policy = data; - struct cpufreq_frequency_table *table; - unsigned int cpu = policy->cpu; if (val == CPUFREQ_UPDATE_POLICY_CPU) { cpufreq_stats_update_policy_cpu(policy); return 0; } - if (val != CPUFREQ_NOTIFY) - return 0; - table = cpufreq_frequency_get_table(cpu); - if (!table) - return 0; - ret = cpufreq_stats_create_table(policy, table); - if (ret) - return ret; - return 0; + if (val == CPUFREQ_CREATE_POLICY) + ret = __cpufreq_stats_create_table(policy); + else if (val == CPUFREQ_REMOVE_POLICY) + __cpufreq_stats_free_table(policy); + + return ret; } static int cpufreq_stat_notifier_trans(struct notifier_block *nb, @@ -341,36 +327,6 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb, return 0; } -static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - cpufreq_update_policy(cpu); - break; - case CPU_DOWN_PREPARE: - cpufreq_stats_free_sysfs(cpu); - break; - case CPU_DEAD: - cpufreq_stats_free_table(cpu); - break; - case CPU_UP_CANCELED_FROZEN: - cpufreq_stats_free_sysfs(cpu); - cpufreq_stats_free_table(cpu); - break; - } - return NOTIFY_OK; -} - -/* priority=1 so this will get called before cpufreq_remove_dev */ -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { - .notifier_call = cpufreq_stat_cpu_callback, - .priority = 1, -}; - static struct notifier_block notifier_policy_block = { .notifier_call = cpufreq_stat_notifier_policy }; @@ -390,16 +346,14 @@ static int __init cpufreq_stats_init(void) if (ret) return ret; - register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) - cpufreq_update_policy(cpu); + cpufreq_stats_create_table(cpu); ret = cpufreq_register_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER); - unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) cpufreq_stats_free_table(cpu); return ret; @@ -415,11 +369,8 @@ static void __exit cpufreq_stats_exit(void) CPUFREQ_POLICY_NOTIFIER); cpufreq_unregister_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); - unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); - for_each_online_cpu(cpu) { + for_each_online_cpu(cpu) cpufreq_stats_free_table(cpu); - cpufreq_stats_free_sysfs(cpu); - } } MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index bbeb9c0720a..4dbf1db16ac 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -13,55 +13,13 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/smp.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/interrupt.h> #include <linux/cpufreq.h> -#include <linux/cpu.h> -#include <linux/types.h> -#include <linux/fs.h> -#include <linux/sysfs.h> +#include <linux/init.h> +#include <linux/module.h> #include <linux/mutex.h> -/** - * A few values needed by the userspace governor - */ -static DEFINE_PER_CPU(unsigned int, cpu_max_freq); -static DEFINE_PER_CPU(unsigned int, cpu_min_freq); -static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */ -static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by - userspace */ static DEFINE_PER_CPU(unsigned int, cpu_is_managed); - static DEFINE_MUTEX(userspace_mutex); -static int cpus_using_userspace_governor; - -/* keep track of frequency transitions */ -static int -userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - - if (!per_cpu(cpu_is_managed, freq->cpu)) - return 0; - - if (val == CPUFREQ_POSTCHANGE) { - pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", - freq->cpu, freq->new); - per_cpu(cpu_cur_freq, freq->cpu) = freq->new; - } - - return 0; -} - -static struct notifier_block userspace_cpufreq_notifier_block = { - .notifier_call = userspace_cpufreq_notifier -}; - /** * cpufreq_set - set the CPU frequency @@ -80,34 +38,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) if (!per_cpu(cpu_is_managed, policy->cpu)) goto err; - per_cpu(cpu_set_freq, policy->cpu) = freq; - - if (freq < per_cpu(cpu_min_freq, policy->cpu)) - freq = per_cpu(cpu_min_freq, policy->cpu); - if (freq > per_cpu(cpu_max_freq, policy->cpu)) - freq = per_cpu(cpu_max_freq, policy->cpu); - - /* - * We're safe from concurrent calls to ->target() here - * as we hold the userspace_mutex lock. If we were calling - * cpufreq_driver_target, a deadlock situation might occur: - * A: cpufreq_set (lock userspace_mutex) -> - * cpufreq_driver_target(lock policy->lock) - * B: cpufreq_set_policy(lock policy->lock) -> - * __cpufreq_governor -> - * cpufreq_governor_userspace (lock userspace_mutex) - */ ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); - err: mutex_unlock(&userspace_mutex); return ret; } - static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) { - return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu)); + return sprintf(buf, "%u\n", policy->cur); } static int cpufreq_governor_userspace(struct cpufreq_policy *policy, @@ -119,73 +58,37 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, switch (event) { case CPUFREQ_GOV_START: BUG_ON(!policy->cur); - mutex_lock(&userspace_mutex); - - if (cpus_using_userspace_governor == 0) { - cpufreq_register_notifier( - &userspace_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - cpus_using_userspace_governor++; + pr_debug("started managing cpu %u\n", cpu); + mutex_lock(&userspace_mutex); per_cpu(cpu_is_managed, cpu) = 1; - per_cpu(cpu_min_freq, cpu) = policy->min; - per_cpu(cpu_max_freq, cpu) = policy->max; - per_cpu(cpu_cur_freq, cpu) = policy->cur; - per_cpu(cpu_set_freq, cpu) = policy->cur; - pr_debug("managing cpu %u started " - "(%u - %u kHz, currently %u kHz)\n", - cpu, - per_cpu(cpu_min_freq, cpu), - per_cpu(cpu_max_freq, cpu), - per_cpu(cpu_cur_freq, cpu)); - mutex_unlock(&userspace_mutex); break; case CPUFREQ_GOV_STOP: - mutex_lock(&userspace_mutex); - cpus_using_userspace_governor--; - if (cpus_using_userspace_governor == 0) { - cpufreq_unregister_notifier( - &userspace_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } + pr_debug("managing cpu %u stopped\n", cpu); + mutex_lock(&userspace_mutex); per_cpu(cpu_is_managed, cpu) = 0; - per_cpu(cpu_min_freq, cpu) = 0; - per_cpu(cpu_max_freq, cpu) = 0; - per_cpu(cpu_set_freq, cpu) = 0; - pr_debug("managing cpu %u stopped\n", cpu); mutex_unlock(&userspace_mutex); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&userspace_mutex); - pr_debug("limit event for cpu %u: %u - %u kHz, " - "currently %u kHz, last set to %u kHz\n", + pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n", cpu, policy->min, policy->max, - per_cpu(cpu_cur_freq, cpu), - per_cpu(cpu_set_freq, cpu)); - if (policy->max < per_cpu(cpu_set_freq, cpu)) { + policy->cur); + + if (policy->max < policy->cur) __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); - } else if (policy->min > per_cpu(cpu_set_freq, cpu)) { + else if (policy->min > policy->cur) __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); - } else { - __cpufreq_driver_target(policy, - per_cpu(cpu_set_freq, cpu), - CPUFREQ_RELATION_L); - } - per_cpu(cpu_min_freq, cpu) = policy->min; - per_cpu(cpu_max_freq, cpu) = policy->max; - per_cpu(cpu_cur_freq, cpu) = policy->cur; mutex_unlock(&userspace_mutex); break; } return rc; } - #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE static #endif @@ -202,13 +105,11 @@ static int __init cpufreq_gov_userspace_init(void) return cpufreq_register_governor(&cpufreq_gov_userspace); } - static void __exit cpufreq_gov_userspace_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_userspace); } - MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, " "Russell King <rmk@arm.linux.org.uk>"); MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'"); diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c index ee142c49057..601b88c490c 100644 --- a/drivers/cpufreq/cris-artpec3-cpufreq.c +++ b/drivers/cpufreq/cris-artpec3-cpufreq.c @@ -15,9 +15,9 @@ static struct notifier_block cris_sdram_freq_notifier_block = { }; static struct cpufreq_frequency_table cris_freq_table[] = { - {0x01, 6000}, - {0x02, 200000}, - {0, CPUFREQ_TABLE_END}, + {0, 0x01, 6000}, + {0, 0x02, 200000}, + {0, 0, CPUFREQ_TABLE_END}, }; static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) @@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) return clk_ctrl.pll ? 200000 : 6000; } -static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) +static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) { - struct cpufreq_freqs freqs; reg_clkgen_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); - freqs.old = cris_freq_get_cpu_frequency(policy->cpu); - freqs.new = cris_freq_table[state].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - local_irq_disable(); /* Even though we may be SMP they will share the same clock @@ -51,68 +44,21 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int cris_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); -} - -static int cris_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target(policy, cris_freq_table, - target_freq, relation, &newstate)) - return -EINVAL; - - cris_freq_set_cpu_state(policy, newstate); - return 0; } static int cris_freq_cpu_init(struct cpufreq_policy *policy) { - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = cris_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); - if (result) - return (result); - - cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); - - return 0; -} - - -static int cris_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_generic_init(policy, cris_freq_table, 1000000); } - -static struct freq_attr *cris_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver cris_freq_driver = { .get = cris_freq_get_cpu_frequency, - .verify = cris_freq_verify, - .target = cris_freq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cris_freq_target, .init = cris_freq_cpu_init, - .exit = cris_freq_cpu_exit, .name = "cris_freq", - .owner = THIS_MODULE, - .attr = cris_freq_attr, + .attr = cpufreq_generic_attr, }; static int __init cris_freq_init(void) diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c index 12952235d5d..22b2cdde74d 100644 --- a/drivers/cpufreq/cris-etraxfs-cpufreq.c +++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c @@ -15,9 +15,9 @@ static struct notifier_block cris_sdram_freq_notifier_block = { }; static struct cpufreq_frequency_table cris_freq_table[] = { - {0x01, 6000}, - {0x02, 200000}, - {0, CPUFREQ_TABLE_END}, + {0, 0x01, 6000}, + {0, 0x02, 200000}, + {0, 0, CPUFREQ_TABLE_END}, }; static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) @@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) return clk_ctrl.pll ? 200000 : 6000; } -static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) +static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) { - struct cpufreq_freqs freqs; reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); - freqs.old = cris_freq_get_cpu_frequency(policy->cpu); - freqs.new = cris_freq_table[state].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - local_irq_disable(); /* Even though we may be SMP they will share the same clock @@ -51,65 +44,21 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int cris_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); -} - -static int cris_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target - (policy, cris_freq_table, target_freq, relation, &newstate)) - return -EINVAL; - - cris_freq_set_cpu_state(policy, newstate); - return 0; } static int cris_freq_cpu_init(struct cpufreq_policy *policy) { - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = cris_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); - if (result) - return (result); - - cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); - - return 0; + return cpufreq_generic_init(policy, cris_freq_table, 1000000); } -static int cris_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *cris_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver cris_freq_driver = { .get = cris_freq_get_cpu_frequency, - .verify = cris_freq_verify, - .target = cris_freq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cris_freq_target, .init = cris_freq_cpu_init, - .exit = cris_freq_cpu_exit, .name = "cris_freq", - .owner = THIS_MODULE, - .attr = cris_freq_attr, + .attr = cpufreq_generic_attr, }; static int __init cris_freq_init(void) diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index c33c76c360f..28a16dc6e02 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c @@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy) if (policy->cpu) return -EINVAL; - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - + cpufreq_verify_within_cpu_limits(policy); policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, @@ -60,63 +58,38 @@ static int davinci_verify_speed(struct cpufreq_policy *policy) return 0; } -static unsigned int davinci_getspeed(unsigned int cpu) +static int davinci_target(struct cpufreq_policy *policy, unsigned int idx) { - if (cpu) - return 0; - - return clk_get_rate(cpufreq.armclk) / 1000; -} - -static int davinci_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - int ret = 0; - unsigned int idx; - struct cpufreq_freqs freqs; struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; struct clk *armclk = cpufreq.armclk; + unsigned int old_freq, new_freq; + int ret = 0; - freqs.old = davinci_getspeed(0); - freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; - - if (freqs.old == freqs.new) - return ret; - - dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); - - ret = cpufreq_frequency_table_target(policy, pdata->freq_table, - freqs.new, relation, &idx); - if (ret) - return -EINVAL; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + old_freq = policy->cur; + new_freq = pdata->freq_table[idx].frequency; /* if moving to higher frequency, up the voltage beforehand */ - if (pdata->set_voltage && freqs.new > freqs.old) { + if (pdata->set_voltage && new_freq > old_freq) { ret = pdata->set_voltage(idx); if (ret) - goto out; + return ret; } ret = clk_set_rate(armclk, idx); if (ret) - goto out; + return ret; if (cpufreq.asyncclk) { ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate); if (ret) - goto out; + return ret; } /* if moving to lower freq, lower the voltage after lowering freq */ - if (pdata->set_voltage && freqs.new < freqs.old) + if (pdata->set_voltage && new_freq < old_freq) pdata->set_voltage(idx); -out: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return ret; + return 0; } static int davinci_cpu_init(struct cpufreq_policy *policy) @@ -135,16 +108,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) return result; } - policy->cur = davinci_getspeed(0); - - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (result) { - pr_err("%s: cpufreq_frequency_table_cpuinfo() failed", - __func__); - return result; - } - - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + policy->clk = cpufreq.armclk; /* * Time measurement across the target() function yields ~1500-1800us @@ -152,30 +116,17 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) * Setting the latency to 2000 us to accommodate addition of drivers * to pre/post change notification list. */ - policy->cpuinfo.transition_latency = 2000 * 1000; - return 0; + return cpufreq_generic_init(policy, freq_table, 2000 * 1000); } -static int davinci_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *davinci_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver davinci_driver = { - .flags = CPUFREQ_STICKY, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = davinci_verify_speed, - .target = davinci_target, - .get = davinci_getspeed, + .target_index = davinci_target, + .get = cpufreq_generic_get, .init = davinci_cpu_init, - .exit = davinci_cpu_exit, .name = "davinci", - .attr = davinci_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int __init davinci_cpufreq_probe(struct platform_device *pdev) diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c index 6ec6539ae04..4bebc1b5db4 100644 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ b/drivers/cpufreq/dbx500-cpufreq.c @@ -19,113 +19,33 @@ static struct cpufreq_frequency_table *freq_table; static struct clk *armss_clk; -static struct freq_attr *dbx500_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, freq_table); -} - static int dbx500_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { - struct cpufreq_freqs freqs; - unsigned int idx; - int ret; - - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &idx)) - return -EINVAL; - - freqs.old = policy->cur; - freqs.new = freq_table[idx].frequency; - - if (freqs.old == freqs.new) - return 0; - - /* pre-change notification */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* update armss clk frequency */ - ret = clk_set_rate(armss_clk, freqs.new * 1000); - - if (ret) { - pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n", - freqs.new * 1000, ret); - return ret; - } - - /* post change notification */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; + return clk_set_rate(armss_clk, freq_table[index].frequency * 1000); } -static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) +static int dbx500_cpufreq_init(struct cpufreq_policy *policy) { - int i = 0; - unsigned long freq = clk_get_rate(armss_clk) / 1000; - - /* The value is rounded to closest frequency in the defined table. */ - while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) { - if (freq < freq_table[i].frequency + - (freq_table[i + 1].frequency - freq_table[i].frequency) / 2) - return freq_table[i].frequency; - i++; - } - - return freq_table[i].frequency; -} - -static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy) -{ - int res; - - /* get policy fields based on the table */ - res = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!res) - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - else { - pr_err("dbx500-cpufreq: Failed to read policy table\n"); - return res; - } - - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - policy->cur = dbx500_cpufreq_getspeed(policy->cpu); - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; - - /* - * FIXME : Need to take time measurement across the target() - * function with no/some/all drivers in the notification - * list. - */ - policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ - - /* policy sharing between dual CPUs */ - cpumask_setall(policy->cpus); - - return 0; + policy->clk = armss_clk; + return cpufreq_generic_init(policy, freq_table, 20 * 1000); } static struct cpufreq_driver dbx500_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, - .verify = dbx500_cpufreq_verify_speed, - .target = dbx500_cpufreq_target, - .get = dbx500_cpufreq_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = dbx500_cpufreq_target, + .get = cpufreq_generic_get, .init = dbx500_cpufreq_init, .name = "DBX500", - .attr = dbx500_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int dbx500_cpufreq_probe(struct platform_device *pdev) { - int i = 0; + struct cpufreq_frequency_table *pos; freq_table = dev_get_platdata(&pdev->dev); if (!freq_table) { @@ -140,10 +60,8 @@ static int dbx500_cpufreq_probe(struct platform_device *pdev) } pr_info("dbx500-cpufreq: Available frequencies:\n"); - while (freq_table[i].frequency != CPUFREQ_TABLE_END) { - pr_info(" %d Mhz\n", freq_table[i].frequency/1000); - i++; - } + cpufreq_for_each_entry(pos, freq_table) + pr_info(" %d Mhz\n", pos->frequency / 1000); return cpufreq_register_driver(&dbx500_cpufreq_driver); } diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 37380fb9262..a0d2a423cea 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -54,7 +54,7 @@ static struct acpi_processor_performance *eps_acpi_cpu_perf; /* Minimum necessary to get acpi_processor_get_bios_limit() working */ static int eps_acpi_init(void) { - eps_acpi_cpu_perf = kzalloc(sizeof(struct acpi_processor_performance), + eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf), GFP_KERNEL); if (!eps_acpi_cpu_perf) return -ENOMEM; @@ -107,15 +107,9 @@ static int eps_set_state(struct eps_cpu_data *centaur, struct cpufreq_policy *policy, u32 dest_state) { - struct cpufreq_freqs freqs; u32 lo, hi; - int err = 0; int i; - freqs.old = eps_get(policy->cpu); - freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* Wait while CPU is busy */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i = 0; @@ -124,8 +118,7 @@ static int eps_set_state(struct eps_cpu_data *centaur, rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i++; if (unlikely(i > 64)) { - err = -ENODEV; - goto postchange; + return -ENODEV; } } /* Set new multiplier and voltage */ @@ -137,16 +130,10 @@ static int eps_set_state(struct eps_cpu_data *centaur, rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i++; if (unlikely(i > 64)) { - err = -ENODEV; - goto postchange; + return -ENODEV; } } while (lo & ((1 << 16) | (1 << 17))); - /* Return current frequency */ -postchange: - rdmsr(MSR_IA32_PERF_STATUS, lo, hi); - freqs.new = centaur->fsb * ((lo >> 8) & 0xff); - #ifdef DEBUG { u8 current_multiplier, current_voltage; @@ -161,16 +148,12 @@ postchange: current_multiplier); } #endif - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return err; + return 0; } -static int eps_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int eps_target(struct cpufreq_policy *policy, unsigned int index) { struct eps_cpu_data *centaur; - unsigned int newstate = 0; unsigned int cpu = policy->cpu; unsigned int dest_state; int ret; @@ -179,28 +162,14 @@ static int eps_target(struct cpufreq_policy *policy, return -ENODEV; centaur = eps_cpu[cpu]; - if (unlikely(cpufreq_frequency_table_target(policy, - &eps_cpu[cpu]->freq_table[0], - target_freq, - relation, - &newstate))) { - return -EINVAL; - } - /* Make frequency transition */ - dest_state = centaur->freq_table[newstate].index & 0xffff; + dest_state = centaur->freq_table[index].driver_data & 0xffff; ret = eps_set_state(centaur, policy, dest_state); if (ret) printk(KERN_ERR "eps: Timeout!\n"); return ret; } -static int eps_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &eps_cpu[policy->cpu]->freq_table[0]); -} - static int eps_cpu_init(struct cpufreq_policy *policy) { unsigned int i; @@ -363,7 +332,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy) states = 2; /* Allocate private data and frequency table for current cpu */ - centaur = kzalloc(sizeof(struct eps_cpu_data) + centaur = kzalloc(sizeof(*centaur) + (states + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL); if (!centaur) @@ -380,9 +349,9 @@ static int eps_cpu_init(struct cpufreq_policy *policy) f_table = ¢aur->freq_table[0]; if (brand != EPS_BRAND_C7M) { f_table[0].frequency = fsb * min_multiplier; - f_table[0].index = (min_multiplier << 8) | min_voltage; + f_table[0].driver_data = (min_multiplier << 8) | min_voltage; f_table[1].frequency = fsb * max_multiplier; - f_table[1].index = (max_multiplier << 8) | max_voltage; + f_table[1].driver_data = (max_multiplier << 8) | max_voltage; f_table[2].frequency = CPUFREQ_TABLE_END; } else { k = 0; @@ -391,22 +360,20 @@ static int eps_cpu_init(struct cpufreq_policy *policy) for (i = min_multiplier; i <= max_multiplier; i++) { voltage = (k * step) / 256 + min_voltage; f_table[k].frequency = fsb * i; - f_table[k].index = (i << 8) | voltage; + f_table[k].driver_data = (i << 8) | voltage; k++; } f_table[k].frequency = CPUFREQ_TABLE_END; } policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ - policy->cur = fsb * current_multiplier; - ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]); + ret = cpufreq_table_validate_and_show(policy, ¢aur->freq_table[0]); if (ret) { kfree(centaur); return ret; } - cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu); return 0; } @@ -415,26 +382,19 @@ static int eps_cpu_exit(struct cpufreq_policy *policy) unsigned int cpu = policy->cpu; /* Bye */ - cpufreq_frequency_table_put_attr(policy->cpu); kfree(eps_cpu[cpu]); eps_cpu[cpu] = NULL; return 0; } -static struct freq_attr *eps_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver eps_driver = { - .verify = eps_verify, - .target = eps_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = eps_target, .init = eps_cpu_init, .exit = eps_cpu_exit, .get = eps_get, .name = "e_powersaver", - .owner = THIS_MODULE, - .attr = eps_attr, + .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 658d860344b..1c06e786c9b 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -56,15 +56,15 @@ static struct s_elan_multiplier elan_multiplier[] = { }; static struct cpufreq_frequency_table elanfreq_table[] = { - {0, 1000}, - {1, 2000}, - {2, 4000}, - {3, 8000}, - {4, 16000}, - {5, 33000}, - {6, 66000}, - {7, 99000}, - {0, CPUFREQ_TABLE_END}, + {0, 0, 1000}, + {0, 1, 2000}, + {0, 2, 4000}, + {0, 3, 8000}, + {0, 4, 16000}, + {0, 5, 33000}, + {0, 6, 66000}, + {0, 7, 99000}, + {0, 0, CPUFREQ_TABLE_END}, }; @@ -105,32 +105,9 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) } -/** - * elanfreq_set_cpu_frequency: Change the CPU core frequency - * @cpu: cpu number - * @freq: frequency in kHz - * - * This function takes a frequency value and changes the CPU frequency - * according to this. Note that the frequency has to be checked by - * elanfreq_validatespeed() for correctness! - * - * There is no return value. - */ - -static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) +static int elanfreq_target(struct cpufreq_policy *policy, + unsigned int state) { - struct cpufreq_freqs freqs; - - freqs.old = elanfreq_get_cpu_frequency(0); - freqs.new = elan_multiplier[state].clock; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", - elan_multiplier[state].clock); - - /* * Access to the Elan's internal registers is indexed via * 0x22: Chip Setup & Control Register Index Register (CSCI) @@ -161,39 +138,8 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, udelay(10000); local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - - -/** - * elanfreq_validatespeed: test if frequency range is valid - * @policy: the policy to validate - * - * This function checks if a given frequency range in kHz is valid - * for the hardware supported by the driver. - */ - -static int elanfreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); -} - -static int elanfreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], - target_freq, relation, &newstate)) - return -EINVAL; - - elanfreq_set_cpu_state(policy, newstate); - return 0; } - - /* * Module init and exit code */ @@ -201,8 +147,7 @@ static int elanfreq_target(struct cpufreq_policy *policy, static int elanfreq_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); - unsigned int i; - int result; + struct cpufreq_frequency_table *pos; /* capability check */ if ((c->x86_vendor != X86_VENDOR_AMD) || @@ -214,28 +159,14 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) max_freq = elanfreq_get_cpu_frequency(0); /* table init */ - for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { - if (elanfreq_table[i].frequency > max_freq) - elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; - } + cpufreq_for_each_entry(pos, elanfreq_table) + if (pos->frequency > max_freq) + pos->frequency = CPUFREQ_ENTRY_INVALID; /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = elanfreq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); - if (result) - return result; - cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); - return 0; -} - - -static int elanfreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_table_validate_and_show(policy, elanfreq_table); } @@ -261,21 +192,13 @@ __setup("elanfreq=", elanfreq_setup); #endif -static struct freq_attr *elanfreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - static struct cpufreq_driver elanfreq_driver = { .get = elanfreq_get_cpu_frequency, - .verify = elanfreq_verify, - .target = elanfreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = elanfreq_target, .init = elanfreq_cpu_init, - .exit = elanfreq_cpu_exit, .name = "elanfreq", - .owner = THIS_MODULE, - .attr = elanfreq_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id elan_id[] = { diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 475b4f607f0..1e0ec57bf6e 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -16,46 +16,28 @@ #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <linux/cpufreq.h> -#include <linux/suspend.h> - -#include <plat/cpu.h> +#include <linux/platform_device.h> +#include <linux/of.h> #include "exynos-cpufreq.h" static struct exynos_dvfs_info *exynos_info; - static struct regulator *arm_regulator; -static struct cpufreq_freqs freqs; - static unsigned int locking_frequency; -static bool frequency_locked; -static DEFINE_MUTEX(cpufreq_lock); - -static int exynos_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - exynos_info->freq_table); -} - -static unsigned int exynos_getspeed(unsigned int cpu) -{ - return clk_get_rate(exynos_info->cpu_clk) / 1000; -} static int exynos_cpufreq_get_index(unsigned int freq) { struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; - int index; + struct cpufreq_frequency_table *pos; - for (index = 0; - freq_table[index].frequency != CPUFREQ_TABLE_END; index++) - if (freq_table[index].frequency == freq) + cpufreq_for_each_entry(pos, freq_table) + if (pos->frequency == freq) break; - if (freq_table[index].frequency == CPUFREQ_TABLE_END) + if (pos->frequency == CPUFREQ_TABLE_END) return -EINVAL; - return index; + return pos - freq_table; } static int exynos_cpufreq_scale(unsigned int target_freq) @@ -65,21 +47,19 @@ static int exynos_cpufreq_scale(unsigned int target_freq) struct cpufreq_policy *policy = cpufreq_cpu_get(0); unsigned int arm_volt, safe_arm_volt = 0; unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; + struct device *dev = exynos_info->dev; + unsigned int old_freq; int index, old_index; int ret = 0; - freqs.old = policy->cur; - freqs.new = target_freq; - - if (freqs.new == freqs.old) - goto out; + old_freq = policy->cur; /* * The policy max have been changed so that we cannot get proper * old_index with cpufreq_frequency_table_target(). Thus, ignore - * policy and get the index from the raw freqeuncy table. + * policy and get the index from the raw frequency table. */ - old_index = exynos_cpufreq_get_index(freqs.old); + old_index = exynos_cpufreq_get_index(old_freq); if (old_index < 0) { ret = old_index; goto out; @@ -104,16 +84,14 @@ static int exynos_cpufreq_scale(unsigned int target_freq) } arm_volt = volt_table[index]; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* When the new frequency is higher than current frequency */ - if ((freqs.new > freqs.old) && !safe_arm_volt) { + if ((target_freq > old_freq) && !safe_arm_volt) { /* Firstly, voltage up to increase frequency */ ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt); if (ret) { - pr_err("%s: failed to set cpu voltage to %d\n", - __func__, arm_volt); - goto out; + dev_err(dev, "failed to set cpu voltage to %d\n", + arm_volt); + return ret; } } @@ -121,212 +99,120 @@ static int exynos_cpufreq_scale(unsigned int target_freq) ret = regulator_set_voltage(arm_regulator, safe_arm_volt, safe_arm_volt); if (ret) { - pr_err("%s: failed to set cpu voltage to %d\n", - __func__, safe_arm_volt); - goto out; + dev_err(dev, "failed to set cpu voltage to %d\n", + safe_arm_volt); + return ret; } } exynos_info->set_freq(old_index, index); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - /* When the new frequency is lower than current frequency */ - if ((freqs.new < freqs.old) || - ((freqs.new > freqs.old) && safe_arm_volt)) { + if ((target_freq < old_freq) || + ((target_freq > old_freq) && safe_arm_volt)) { /* down the voltage after frequency change */ - regulator_set_voltage(arm_regulator, arm_volt, + ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt); if (ret) { - pr_err("%s: failed to set cpu voltage to %d\n", - __func__, arm_volt); + dev_err(dev, "failed to set cpu voltage to %d\n", + arm_volt); goto out; } } out: - cpufreq_cpu_put(policy); return ret; } -static int exynos_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; - unsigned int index; - unsigned int new_freq; - int ret = 0; - - mutex_lock(&cpufreq_lock); - - if (frequency_locked) - goto out; - - if (cpufreq_frequency_table_target(policy, freq_table, - target_freq, relation, &index)) { - ret = -EINVAL; - goto out; - } - - new_freq = freq_table[index].frequency; - - ret = exynos_cpufreq_scale(new_freq); - -out: - mutex_unlock(&cpufreq_lock); - - return ret; -} - -#ifdef CONFIG_PM -static int exynos_cpufreq_suspend(struct cpufreq_policy *policy) -{ - return 0; -} - -static int exynos_cpufreq_resume(struct cpufreq_policy *policy) +static int exynos_target(struct cpufreq_policy *policy, unsigned int index) { - return 0; -} -#endif - -/** - * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume - * context - * @notifier - * @pm_event - * @v - * - * While frequency_locked == true, target() ignores every frequency but - * locking_frequency. The locking_frequency value is the initial frequency, - * which is set by the bootloader. In order to eliminate possible - * inconsistency in clock values, we save and restore frequencies during - * suspend and resume and block CPUFREQ activities. Note that the standard - * suspend/resume cannot be used as they are too deep (syscore_ops) for - * regulator actions. - */ -static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier, - unsigned long pm_event, void *v) -{ - int ret; - - switch (pm_event) { - case PM_SUSPEND_PREPARE: - mutex_lock(&cpufreq_lock); - frequency_locked = true; - mutex_unlock(&cpufreq_lock); - - ret = exynos_cpufreq_scale(locking_frequency); - if (ret < 0) - return NOTIFY_BAD; - - break; - - case PM_POST_SUSPEND: - mutex_lock(&cpufreq_lock); - frequency_locked = false; - mutex_unlock(&cpufreq_lock); - break; - } - - return NOTIFY_OK; + return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency); } -static struct notifier_block exynos_cpufreq_nb = { - .notifier_call = exynos_cpufreq_pm_notifier, -}; - static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) { - policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu); - - cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); - - /* set the transition latency value */ - policy->cpuinfo.transition_latency = 100000; - - cpumask_setall(policy->cpus); - - return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); -} - -static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + policy->clk = exynos_info->cpu_clk; + policy->suspend_freq = locking_frequency; + return cpufreq_generic_init(policy, exynos_info->freq_table, 100000); } -static struct freq_attr *exynos_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver exynos_driver = { - .flags = CPUFREQ_STICKY, - .verify = exynos_verify_speed, - .target = exynos_target, - .get = exynos_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = exynos_target, + .get = cpufreq_generic_get, .init = exynos_cpufreq_cpu_init, - .exit = exynos_cpufreq_cpu_exit, .name = "exynos_cpufreq", - .attr = exynos_cpufreq_attr, + .attr = cpufreq_generic_attr, +#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW + .boost_supported = true, +#endif #ifdef CONFIG_PM - .suspend = exynos_cpufreq_suspend, - .resume = exynos_cpufreq_resume, + .suspend = cpufreq_generic_suspend, #endif }; -static int __init exynos_cpufreq_init(void) +static int exynos_cpufreq_probe(struct platform_device *pdev) { int ret = -EINVAL; - exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL); + exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); if (!exynos_info) return -ENOMEM; - if (soc_is_exynos4210()) + exynos_info->dev = &pdev->dev; + + if (of_machine_is_compatible("samsung,exynos4210")) { + exynos_info->type = EXYNOS_SOC_4210; ret = exynos4210_cpufreq_init(exynos_info); - else if (soc_is_exynos4212() || soc_is_exynos4412()) + } else if (of_machine_is_compatible("samsung,exynos4212")) { + exynos_info->type = EXYNOS_SOC_4212; ret = exynos4x12_cpufreq_init(exynos_info); - else if (soc_is_exynos5250()) + } else if (of_machine_is_compatible("samsung,exynos4412")) { + exynos_info->type = EXYNOS_SOC_4412; + ret = exynos4x12_cpufreq_init(exynos_info); + } else if (of_machine_is_compatible("samsung,exynos5250")) { + exynos_info->type = EXYNOS_SOC_5250; ret = exynos5250_cpufreq_init(exynos_info); - else - return 0; + } else { + pr_err("%s: Unknown SoC type\n", __func__); + return -ENODEV; + } if (ret) goto err_vdd_arm; if (exynos_info->set_freq == NULL) { - pr_err("%s: No set_freq function (ERR)\n", __func__); + dev_err(&pdev->dev, "No set_freq function (ERR)\n"); goto err_vdd_arm; } arm_regulator = regulator_get(NULL, "vdd_arm"); if (IS_ERR(arm_regulator)) { - pr_err("%s: failed to get resource vdd_arm\n", __func__); + dev_err(&pdev->dev, "failed to get resource vdd_arm\n"); goto err_vdd_arm; } - locking_frequency = exynos_getspeed(0); + /* Done here as we want to capture boot frequency */ + locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000; - register_pm_notifier(&exynos_cpufreq_nb); - - if (cpufreq_register_driver(&exynos_driver)) { - pr_err("%s: failed to register cpufreq driver\n", __func__); - goto err_cpufreq; - } - - return 0; -err_cpufreq: - unregister_pm_notifier(&exynos_cpufreq_nb); + if (!cpufreq_register_driver(&exynos_driver)) + return 0; + dev_err(&pdev->dev, "failed to register cpufreq driver\n"); regulator_put(arm_regulator); err_vdd_arm: kfree(exynos_info); - pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -late_initcall(exynos_cpufreq_init); + +static struct platform_driver exynos_cpufreq_platdrv = { + .driver = { + .name = "exynos-cpufreq", + .owner = THIS_MODULE, + }, + .probe = exynos_cpufreq_probe, +}; +module_platform_driver(exynos_cpufreq_platdrv); diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h index 92b852ee5dd..9f2062a7cc0 100644 --- a/drivers/cpufreq/exynos-cpufreq.h +++ b/drivers/cpufreq/exynos-cpufreq.h @@ -17,6 +17,13 @@ enum cpufreq_level_index { L20, }; +enum exynos_soc_type { + EXYNOS_SOC_4210, + EXYNOS_SOC_4212, + EXYNOS_SOC_4412, + EXYNOS_SOC_5250, +}; + #define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \ { \ .freq = (f) * 1000, \ @@ -34,6 +41,8 @@ struct apll_freq { }; struct exynos_dvfs_info { + enum exynos_soc_type type; + struct device *dev; unsigned long mpll_freq_khz; unsigned int pll_safe_idx; struct clk *cpu_clk; @@ -41,8 +50,49 @@ struct exynos_dvfs_info { struct cpufreq_frequency_table *freq_table; void (*set_freq)(unsigned int, unsigned int); bool (*need_apll_change)(unsigned int, unsigned int); + void __iomem *cmu_regs; }; +#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *); +#else +static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) +{ + return -EOPNOTSUPP; +} +#endif +#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *); +#else +static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) +{ + return -EOPNOTSUPP; +} +#endif +#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *); +#else +static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) +{ + return -EOPNOTSUPP; +} +#endif + +#define EXYNOS4_CLKSRC_CPU 0x14200 +#define EXYNOS4_CLKMUX_STATCPU 0x14400 + +#define EXYNOS4_CLKDIV_CPU 0x14500 +#define EXYNOS4_CLKDIV_CPU1 0x14504 +#define EXYNOS4_CLKDIV_STATCPU 0x14600 +#define EXYNOS4_CLKDIV_STATCPU1 0x14604 + +#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16) +#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT) + +#define EXYNOS5_APLL_LOCK 0x00000 +#define EXYNOS5_APLL_CON0 0x00100 +#define EXYNOS5_CLKMUX_STATCPU 0x00400 +#define EXYNOS5_CLKDIV_CPU0 0x00500 +#define EXYNOS5_CLKDIV_CPU1 0x00504 +#define EXYNOS5_CLKDIV_STATCPU0 0x00600 +#define EXYNOS5_CLKDIV_STATCPU1 0x00604 diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index add7fbec4fc..61a54310a1b 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -16,8 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> - -#include <mach/regs-clock.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -25,18 +25,19 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos4210_volt_table[] = { 1250000, 1150000, 1050000, 975000, 950000, }; static struct cpufreq_frequency_table exynos4210_freq_table[] = { - {L0, 1200 * 1000}, - {L1, 1000 * 1000}, - {L2, 800 * 1000}, - {L3, 500 * 1000}, - {L4, 200 * 1000}, - {0, CPUFREQ_TABLE_END}, + {0, L0, 1200 * 1000}, + {0, L1, 1000 * 1000}, + {0, L2, 800 * 1000}, + {0, L3, 500 * 1000}, + {0, L4, 200 * 1000}, + {0, 0, CPUFREQ_TABLE_END}, }; static struct apll_freq apll_freq_4210[] = { @@ -62,113 +63,85 @@ static void exynos4210_set_clkdiv(unsigned int div_index) tmp = apll_freq_4210[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); do { - tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU); } while (tmp & 0x1111111); /* Change Divider - CPU1 */ tmp = apll_freq_4210[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); do { - tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); } while (tmp & 0x11); } static void exynos4210_set_apll(unsigned int index) { - unsigned int tmp; + unsigned int tmp, freq = apll_freq_4210[index].freq; - /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ + /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ clk_set_parent(moutcore, mout_mpll); do { - tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); tmp &= 0x7; } while (tmp != 0x2); - /* 2. Set APLL Lock time */ - __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK); - - /* 3. Change PLL PMS values */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); - tmp |= apll_freq_4210[index].mps; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - - /* 4. wait_lock_time */ - do { - tmp = __raw_readl(EXYNOS4_APLL_CON0); - } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT))); + clk_set_rate(mout_apll, freq * 1000); - /* 5. MUX_CORE_SEL = APLL */ + /* MUX_CORE_SEL = APLL */ clk_set_parent(moutcore, mout_apll); do { - tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); } -static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index) -{ - unsigned int old_pm = apll_freq_4210[old_index].mps >> 8; - unsigned int new_pm = apll_freq_4210[new_index].mps >> 8; - - return (old_pm == new_pm) ? 0 : 1; -} - static void exynos4210_set_frequency(unsigned int old_index, unsigned int new_index) { - unsigned int tmp; - if (old_index > new_index) { - if (!exynos4210_pms_change(old_index, new_index)) { - /* 1. Change the system clock divider values */ - exynos4210_set_clkdiv(new_index); - - /* 2. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_4210[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - } else { - /* Clock Configuration Procedure */ - /* 1. Change the system clock divider values */ - exynos4210_set_clkdiv(new_index); - /* 2. Change the apll m,p,s value */ - exynos4210_set_apll(new_index); - } + exynos4210_set_clkdiv(new_index); + exynos4210_set_apll(new_index); } else if (old_index < new_index) { - if (!exynos4210_pms_change(old_index, new_index)) { - /* 1. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_4210[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - - /* 2. Change the system clock divider values */ - exynos4210_set_clkdiv(new_index); - } else { - /* Clock Configuration Procedure */ - /* 1. Change the apll m,p,s value */ - exynos4210_set_apll(new_index); - /* 2. Change the system clock divider values */ - exynos4210_set_clkdiv(new_index); - } + exynos4210_set_apll(new_index); + exynos4210_set_clkdiv(new_index); } } int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -194,7 +167,8 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) info->volt_table = exynos4210_volt_table; info->freq_table = exynos4210_freq_table; info->set_freq = exynos4210_set_frequency; - info->need_apll_change = exynos4210_pms_change; + + cpufreq = info; return 0; @@ -208,4 +182,3 @@ err_moutcore: pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos4210_cpufreq_init); diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 08b7477b0aa..351a2074cfe 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c @@ -16,8 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> - -#include <mach/regs-clock.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -25,6 +25,7 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos4x12_volt_table[] = { 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500, @@ -32,21 +33,21 @@ static unsigned int exynos4x12_volt_table[] = { }; static struct cpufreq_frequency_table exynos4x12_freq_table[] = { - {L0, CPUFREQ_ENTRY_INVALID}, - {L1, 1400 * 1000}, - {L2, 1300 * 1000}, - {L3, 1200 * 1000}, - {L4, 1100 * 1000}, - {L5, 1000 * 1000}, - {L6, 900 * 1000}, - {L7, 800 * 1000}, - {L8, 700 * 1000}, - {L9, 600 * 1000}, - {L10, 500 * 1000}, - {L11, 400 * 1000}, - {L12, 300 * 1000}, - {L13, 200 * 1000}, - {0, CPUFREQ_TABLE_END}, + {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000}, + {0, L1, 1400 * 1000}, + {0, L2, 1300 * 1000}, + {0, L3, 1200 * 1000}, + {0, L4, 1100 * 1000}, + {0, L5, 1000 * 1000}, + {0, L6, 900 * 1000}, + {0, L7, 800 * 1000}, + {0, L8, 700 * 1000}, + {0, L9, 600 * 1000}, + {0, L10, 500 * 1000}, + {0, L11, 400 * 1000}, + {0, L12, 300 * 1000}, + {0, L13, 200 * 1000}, + {0, 0, CPUFREQ_TABLE_END}, }; static struct apll_freq *apll_freq_4x12; @@ -102,124 +103,92 @@ static struct apll_freq apll_freq_4412[] = { static void exynos4x12_set_clkdiv(unsigned int div_index) { unsigned int tmp; - unsigned int stat_cpu1; /* Change Divider - CPU0 */ tmp = apll_freq_4x12[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); - while (__raw_readl(EXYNOS4_CLKDIV_STATCPU) & 0x11111111) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU) + & 0x11111111) cpu_relax(); /* Change Divider - CPU1 */ tmp = apll_freq_4x12[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); - if (soc_is_exynos4212()) - stat_cpu1 = 0x11; - else - stat_cpu1 = 0x111; + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); - while (__raw_readl(EXYNOS4_CLKDIV_STATCPU1) & stat_cpu1) + do { cpu_relax(); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); + } while (tmp != 0x0); } static void exynos4x12_set_apll(unsigned int index) { - unsigned int tmp, pdiv; + unsigned int tmp, freq = apll_freq_4x12[index].freq; - /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ + /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ clk_set_parent(moutcore, mout_mpll); do { cpu_relax(); - tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); tmp &= 0x7; } while (tmp != 0x2); - /* 2. Set APLL Lock time */ - pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f); + clk_set_rate(mout_apll, freq * 1000); - __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK); - - /* 3. Change PLL PMS values */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); - tmp |= apll_freq_4x12[index].mps; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - - /* 4. wait_lock_time */ - do { - cpu_relax(); - tmp = __raw_readl(EXYNOS4_APLL_CON0); - } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT))); - - /* 5. MUX_CORE_SEL = APLL */ + /* MUX_CORE_SEL = APLL */ clk_set_parent(moutcore, mout_apll); do { cpu_relax(); - tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); } -static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index) -{ - unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8; - unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8; - - return (old_pm == new_pm) ? 0 : 1; -} - static void exynos4x12_set_frequency(unsigned int old_index, unsigned int new_index) { - unsigned int tmp; - if (old_index > new_index) { - if (!exynos4x12_pms_change(old_index, new_index)) { - /* 1. Change the system clock divider values */ - exynos4x12_set_clkdiv(new_index); - /* 2. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_4x12[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - - } else { - /* Clock Configuration Procedure */ - /* 1. Change the system clock divider values */ - exynos4x12_set_clkdiv(new_index); - /* 2. Change the apll m,p,s value */ - exynos4x12_set_apll(new_index); - } + exynos4x12_set_clkdiv(new_index); + exynos4x12_set_apll(new_index); } else if (old_index < new_index) { - if (!exynos4x12_pms_change(old_index, new_index)) { - /* 1. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS4_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_4x12[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS4_APLL_CON0); - /* 2. Change the system clock divider values */ - exynos4x12_set_clkdiv(new_index); - } else { - /* Clock Configuration Procedure */ - /* 1. Change the apll m,p,s value */ - exynos4x12_set_apll(new_index); - /* 2. Change the system clock divider values */ - exynos4x12_set_clkdiv(new_index); - } + exynos4x12_set_apll(new_index); + exynos4x12_set_clkdiv(new_index); } } int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -238,7 +207,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) if (IS_ERR(mout_apll)) goto err_mout_apll; - if (soc_is_exynos4212()) + if (info->type == EXYNOS_SOC_4212) apll_freq_4x12 = apll_freq_4212; else apll_freq_4x12 = apll_freq_4412; @@ -250,7 +219,8 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) info->volt_table = exynos4x12_volt_table; info->freq_table = exynos4x12_freq_table; info->set_freq = exynos4x12_set_frequency; - info->need_apll_change = exynos4x12_pms_change; + + cpufreq = info; return 0; @@ -264,4 +234,3 @@ err_moutcore: pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos4x12_cpufreq_init); diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index 9fae466d774..c91ce69dc63 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c @@ -16,9 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> - -#include <mach/map.h> -#include <mach/regs-clock.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -26,6 +25,7 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos5250_volt_table[] = { 1300000, 1250000, 1225000, 1200000, 1150000, @@ -35,23 +35,23 @@ static unsigned int exynos5250_volt_table[] = { }; static struct cpufreq_frequency_table exynos5250_freq_table[] = { - {L0, 1700 * 1000}, - {L1, 1600 * 1000}, - {L2, 1500 * 1000}, - {L3, 1400 * 1000}, - {L4, 1300 * 1000}, - {L5, 1200 * 1000}, - {L6, 1100 * 1000}, - {L7, 1000 * 1000}, - {L8, 900 * 1000}, - {L9, 800 * 1000}, - {L10, 700 * 1000}, - {L11, 600 * 1000}, - {L12, 500 * 1000}, - {L13, 400 * 1000}, - {L14, 300 * 1000}, - {L15, 200 * 1000}, - {0, CPUFREQ_TABLE_END}, + {0, L0, 1700 * 1000}, + {0, L1, 1600 * 1000}, + {0, L2, 1500 * 1000}, + {0, L3, 1400 * 1000}, + {0, L4, 1300 * 1000}, + {0, L5, 1200 * 1000}, + {0, L6, 1100 * 1000}, + {0, L7, 1000 * 1000}, + {0, L8, 900 * 1000}, + {0, L9, 800 * 1000}, + {0, L10, 700 * 1000}, + {0, L11, 600 * 1000}, + {0, L12, 500 * 1000}, + {0, L13, 400 * 1000}, + {0, L14, 300 * 1000}, + {0, L15, 200 * 1000}, + {0, 0, CPUFREQ_TABLE_END}, }; static struct apll_freq apll_freq_5250[] = { @@ -88,115 +88,86 @@ static void set_clkdiv(unsigned int div_index) tmp = apll_freq_5250[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0); - while (__raw_readl(EXYNOS5_CLKDIV_STATCPU0) & 0x11111111) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0) + & 0x11111111) cpu_relax(); /* Change Divider - CPU1 */ tmp = apll_freq_5250[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1); - while (__raw_readl(EXYNOS5_CLKDIV_STATCPU1) & 0x11) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11) cpu_relax(); } -static void set_apll(unsigned int new_index, - unsigned int old_index) +static void set_apll(unsigned int index) { - unsigned int tmp, pdiv; + unsigned int tmp; + unsigned int freq = apll_freq_5250[index].freq; - /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ + /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ clk_set_parent(moutcore, mout_mpll); do { cpu_relax(); - tmp = (__raw_readl(EXYNOS5_CLKMUX_STATCPU) >> 16); + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU) + >> 16); tmp &= 0x7; } while (tmp != 0x2); - /* 2. Set APLL Lock time */ - pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f); - - __raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK); + clk_set_rate(mout_apll, freq * 1000); - /* 3. Change PLL PMS values */ - tmp = __raw_readl(EXYNOS5_APLL_CON0); - tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); - tmp |= apll_freq_5250[new_index].mps; - __raw_writel(tmp, EXYNOS5_APLL_CON0); - - /* 4. wait_lock_time */ - do { - cpu_relax(); - tmp = __raw_readl(EXYNOS5_APLL_CON0); - } while (!(tmp & (0x1 << 29))); - - /* 5. MUX_CORE_SEL = APLL */ + /* MUX_CORE_SEL = APLL */ clk_set_parent(moutcore, mout_apll); do { cpu_relax(); - tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU); tmp &= (0x7 << 16); } while (tmp != (0x1 << 16)); - -} - -static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index) -{ - unsigned int old_pm = apll_freq_5250[old_index].mps >> 8; - unsigned int new_pm = apll_freq_5250[new_index].mps >> 8; - - return (old_pm == new_pm) ? 0 : 1; } static void exynos5250_set_frequency(unsigned int old_index, unsigned int new_index) { - unsigned int tmp; - if (old_index > new_index) { - if (!exynos5250_pms_change(old_index, new_index)) { - /* 1. Change the system clock divider values */ - set_clkdiv(new_index); - /* 2. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS5_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_5250[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS5_APLL_CON0); - - } else { - /* Clock Configuration Procedure */ - /* 1. Change the system clock divider values */ - set_clkdiv(new_index); - /* 2. Change the apll m,p,s value */ - set_apll(new_index, old_index); - } + set_clkdiv(new_index); + set_apll(new_index); } else if (old_index < new_index) { - if (!exynos5250_pms_change(old_index, new_index)) { - /* 1. Change just s value in apll m,p,s value */ - tmp = __raw_readl(EXYNOS5_APLL_CON0); - tmp &= ~(0x7 << 0); - tmp |= apll_freq_5250[new_index].mps & 0x7; - __raw_writel(tmp, EXYNOS5_APLL_CON0); - /* 2. Change the system clock divider values */ - set_clkdiv(new_index); - } else { - /* Clock Configuration Procedure */ - /* 1. Change the apll m,p,s value */ - set_apll(new_index, old_index); - /* 2. Change the system clock divider values */ - set_clkdiv(new_index); - } + set_apll(new_index); + set_clkdiv(new_index); } } int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -222,7 +193,8 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) info->volt_table = exynos5250_volt_table; info->freq_table = exynos5250_freq_table; info->set_freq = exynos5250_set_frequency; - info->need_apll_change = exynos5250_pms_change; + + cpufreq = info; return 0; @@ -236,4 +208,3 @@ err_moutcore: pr_err("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos5250_cpufreq_init); diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index 0c74018eda4..f33f25b483c 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c @@ -20,7 +20,7 @@ #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -100,7 +100,6 @@ struct exynos_dvfs_data { struct resource *mem; int irq; struct clk *cpu_clk; - unsigned int cur_frequency; unsigned int latency; struct cpufreq_frequency_table *freq_table; unsigned int freq_count; @@ -115,25 +114,23 @@ static struct cpufreq_freqs freqs; static int init_div_table(void) { - struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; + struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table; unsigned int tmp, clk_div, ema_div, freq, volt_id; - int i = 0; - struct opp *opp; + struct dev_pm_opp *opp; rcu_read_lock(); - for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { - - opp = opp_find_freq_exact(dvfs_info->dev, - freq_tbl[i].frequency * 1000, true); + cpufreq_for_each_entry(pos, freq_tbl) { + opp = dev_pm_opp_find_freq_exact(dvfs_info->dev, + pos->frequency * 1000, true); if (IS_ERR(opp)) { rcu_read_unlock(); dev_err(dvfs_info->dev, "failed to find valid OPP for %u KHZ\n", - freq_tbl[i].frequency); + pos->frequency); return PTR_ERR(opp); } - freq = freq_tbl[i].frequency / 1000; /* In MHZ */ + freq = pos->frequency / 1000; /* In MHZ */ clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK) << P0_7_CPUCLKDEV_SHIFT; clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK) @@ -142,7 +139,7 @@ static int init_div_table(void) << P0_7_CSCLKDEV_SHIFT; /* Calculate EMA */ - volt_id = opp_get_voltage(opp); + volt_id = dev_pm_opp_get_voltage(opp); volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP; if (volt_id < PMIC_HIGH_VOLT) { ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) | @@ -158,17 +155,19 @@ static int init_div_table(void) tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT) | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT)); - __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i); + __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * + (pos - freq_tbl)); } rcu_read_unlock(); return 0; } -static void exynos_enable_dvfs(void) +static void exynos_enable_dvfs(unsigned int cur_frequency) { - unsigned int tmp, i, cpu; + unsigned int tmp, cpu; struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; + struct cpufreq_frequency_table *pos; /* Disable DVFS */ __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL); @@ -183,24 +182,24 @@ static void exynos_enable_dvfs(void) __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN); /* Set initial performance index */ - for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) - if (freq_table[i].frequency == dvfs_info->cur_frequency) + cpufreq_for_each_entry(pos, freq_table) + if (pos->frequency == cur_frequency) break; - if (freq_table[i].frequency == CPUFREQ_TABLE_END) { + if (pos->frequency == CPUFREQ_TABLE_END) { dev_crit(dvfs_info->dev, "Boot up frequency not supported\n"); /* Assign the highest frequency */ - i = 0; - dvfs_info->cur_frequency = freq_table[i].frequency; + pos = freq_table; + cur_frequency = pos->frequency; } dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ", - dvfs_info->cur_frequency); + cur_frequency); for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) { tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT); - tmp |= (i << C0_3_PSTATE_NEW_SHIFT); + tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT); __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); } @@ -209,36 +208,18 @@ static void exynos_enable_dvfs(void) dvfs_info->base + XMU_DVFS_CTRL); } -static int exynos_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - dvfs_info->freq_table); -} - -static unsigned int exynos_getspeed(unsigned int cpu) +static int exynos_target(struct cpufreq_policy *policy, unsigned int index) { - return dvfs_info->cur_frequency; -} - -static int exynos_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int index, tmp; - int ret = 0, i; + unsigned int tmp; + int i; struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; mutex_lock(&cpufreq_lock); - ret = cpufreq_frequency_table_target(policy, freq_table, - target_freq, relation, &index); - if (ret) - goto out; - - freqs.old = dvfs_info->cur_frequency; + freqs.old = policy->cur; freqs.new = freq_table[index].frequency; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); /* Set the target frequency in all C0_3_PSTATE register */ for_each_cpu(i, policy->cpus) { @@ -248,9 +229,8 @@ static int exynos_target(struct cpufreq_policy *policy, __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4); } -out: mutex_unlock(&cpufreq_lock); - return ret; + return 0; } static void exynos_cpufreq_work(struct work_struct *work) @@ -264,7 +244,7 @@ static void exynos_cpufreq_work(struct work_struct *work) goto skip_work; mutex_lock(&cpufreq_lock); - freqs.old = dvfs_info->cur_frequency; + freqs.old = policy->cur; cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS); if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1) @@ -274,12 +254,11 @@ static void exynos_cpufreq_work(struct work_struct *work) if (likely(index < dvfs_info->freq_count)) { freqs.new = freq_table[index].frequency; - dvfs_info->cur_frequency = freqs.new; } else { dev_crit(dvfs_info->dev, "New frequency out of range\n"); - freqs.new = dvfs_info->cur_frequency; + freqs.new = freqs.old; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); cpufreq_cpu_put(policy); mutex_unlock(&cpufreq_lock); @@ -321,30 +300,20 @@ static void exynos_sort_descend_freq_table(void) static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) { - int ret; - - ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table); - if (ret) { - dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret); - return ret; - } - - policy->cur = dvfs_info->cur_frequency; - policy->cpuinfo.transition_latency = dvfs_info->latency; - cpumask_setall(policy->cpus); - - cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu); - - return 0; + policy->clk = dvfs_info->cpu_clk; + return cpufreq_generic_init(policy, dvfs_info->freq_table, + dvfs_info->latency); } static struct cpufreq_driver exynos_driver = { - .flags = CPUFREQ_STICKY, - .verify = exynos_verify_speed, - .target = exynos_target, - .get = exynos_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = exynos_target, + .get = cpufreq_generic_get, .init = exynos_cpufreq_cpu_init, .name = CPUFREQ_NAME, + .attr = cpufreq_generic_attr, }; static const struct of_device_id exynos_cpufreq_match[] = { @@ -360,6 +329,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) int ret = -EINVAL; struct device_node *np; struct resource res; + unsigned int cur_frequency; np = pdev->dev.of_node; if (!np) @@ -396,13 +366,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) goto err_put_node; } - ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); + ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev, + &dvfs_info->freq_table); if (ret) { dev_err(dvfs_info->dev, "failed to init cpufreq table: %d\n", ret); goto err_put_node; } - dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev); + dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev); exynos_sort_descend_freq_table(); if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency)) @@ -415,13 +386,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) goto err_free_table; } - dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk); - if (!dvfs_info->cur_frequency) { + cur_frequency = clk_get_rate(dvfs_info->cpu_clk); + if (!cur_frequency) { dev_err(dvfs_info->dev, "Failed to get clock rate\n"); ret = -EINVAL; goto err_free_table; } - dvfs_info->cur_frequency /= 1000; + cur_frequency /= 1000; INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work); ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq, @@ -438,7 +409,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) goto err_free_table; } - exynos_enable_dvfs(); + exynos_enable_dvfs(cur_frequency); ret = cpufreq_register_driver(&exynos_driver); if (ret) { dev_err(dvfs_info->dev, @@ -451,17 +422,17 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) return 0; err_free_table: - opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); + dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); err_put_node: of_node_put(np); - dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); + dev_err(&pdev->dev, "%s: failed initialization\n", __func__); return ret; } static int exynos_cpufreq_remove(struct platform_device *pdev) { cpufreq_unregister_driver(&exynos_driver); - opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); + dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); return 0; } diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index d7a79662e24..1632981c4b2 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -11,10 +11,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> #include <linux/cpufreq.h> +#include <linux/module.h> /********************************************************************* * FREQUENCY TABLE HELPERS * @@ -23,19 +21,19 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { + struct cpufreq_frequency_table *pos; unsigned int min_freq = ~0; unsigned int max_freq = 0; - unsigned int i; + unsigned int freq; - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) { - pr_debug("table entry %u is invalid, skipping\n", i); + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + if (!cpufreq_boost_enabled() + && (pos->flags & CPUFREQ_BOOST_FREQ)) continue; - } - pr_debug("table entry %u: %u kHz, %u index\n", - i, freq, table[i].index); + + pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq); if (freq < min_freq) min_freq = freq; if (freq > max_freq) @@ -56,31 +54,31 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { - unsigned int next_larger = ~0; - unsigned int i; - unsigned int count = 0; + struct cpufreq_frequency_table *pos; + unsigned int freq, next_larger = ~0; + bool found = false; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) - continue; - if ((freq >= policy->min) && (freq <= policy->max)) - count++; - else if ((next_larger > freq) && (freq > policy->max)) + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + + if ((freq >= policy->min) && (freq <= policy->max)) { + found = true; + break; + } + + if ((next_larger > freq) && (freq > policy->max)) next_larger = freq; } - if (!count) + if (!found) { policy->max = next_larger; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); + } pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); @@ -89,6 +87,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); +/* + * Generic routine to verify policy & frequency table, requires driver to set + * policy->freq_table prior to it. + */ +int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *table = + cpufreq_frequency_get_table(policy->cpu); + if (!table) + return -ENODEV; + + return cpufreq_frequency_table_verify(policy, table); +} +EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); int cpufreq_frequency_table_target(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, @@ -97,14 +109,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int *index) { struct cpufreq_frequency_table optimal = { - .index = ~0, + .driver_data = ~0, .frequency = 0, }; struct cpufreq_frequency_table suboptimal = { - .index = ~0, + .driver_data = ~0, .frequency = 0, }; - unsigned int i; + struct cpufreq_frequency_table *pos; + unsigned int freq, i = 0; pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); @@ -118,75 +131,106 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, break; } - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) - continue; + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + + i = pos - table; if ((freq < policy->min) || (freq > policy->max)) continue; + if (freq == target_freq) { + optimal.driver_data = i; + break; + } switch (relation) { case CPUFREQ_RELATION_H: - if (freq <= target_freq) { + if (freq < target_freq) { if (freq >= optimal.frequency) { optimal.frequency = freq; - optimal.index = i; + optimal.driver_data = i; } } else { if (freq <= suboptimal.frequency) { suboptimal.frequency = freq; - suboptimal.index = i; + suboptimal.driver_data = i; } } break; case CPUFREQ_RELATION_L: - if (freq >= target_freq) { + if (freq > target_freq) { if (freq <= optimal.frequency) { optimal.frequency = freq; - optimal.index = i; + optimal.driver_data = i; } } else { if (freq >= suboptimal.frequency) { suboptimal.frequency = freq; - suboptimal.index = i; + suboptimal.driver_data = i; } } break; } } - if (optimal.index > i) { - if (suboptimal.index > i) + if (optimal.driver_data > i) { + if (suboptimal.driver_data > i) return -EINVAL; - *index = suboptimal.index; + *index = suboptimal.driver_data; } else - *index = optimal.index; + *index = optimal.driver_data; - pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, - table[*index].index); + pr_debug("target index is %u, freq is:%u kHz\n", *index, + table[*index].frequency); return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); -static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table); +int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, + unsigned int freq) +{ + struct cpufreq_frequency_table *pos, *table; + + table = cpufreq_frequency_get_table(policy->cpu); + if (unlikely(!table)) { + pr_debug("%s: Unable to find frequency table\n", __func__); + return -ENOENT; + } + + cpufreq_for_each_valid_entry(pos, table) + if (pos->frequency == freq) + return pos - table; + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index); + /** * show_available_freqs - show available frequencies for the specified CPU */ -static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) +static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf, + bool show_boost) { - unsigned int i = 0; - unsigned int cpu = policy->cpu; ssize_t count = 0; - struct cpufreq_frequency_table *table; + struct cpufreq_frequency_table *pos, *table = policy->freq_table; - if (!per_cpu(cpufreq_show_table, cpu)) + if (!table) return -ENODEV; - table = per_cpu(cpufreq_show_table, cpu); - - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) + cpufreq_for_each_valid_entry(pos, table) { + /* + * show_boost = true and driver_data = BOOST freq + * display BOOST freqs + * + * show_boost = false and driver_data = BOOST freq + * show_boost = true and driver_data != BOOST freq + * continue - do not display anything + * + * show_boost = false and driver_data != BOOST freq + * display NON BOOST freqs + */ + if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ)) continue; - count += sprintf(&buf[count], "%d ", table[i].frequency); + + count += sprintf(&buf[count], "%d ", pos->frequency); } count += sprintf(&buf[count], "\n"); @@ -194,45 +238,61 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) } -struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { - .attr = { .name = "scaling_available_frequencies", - .mode = 0444, - }, - .show = show_available_freqs, -}; -EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); +#define cpufreq_attr_available_freq(_name) \ +struct freq_attr cpufreq_freq_attr_##_name##_freqs = \ +__ATTR_RO(_name##_frequencies) -/* - * if you use these, you must assure that the frequency table is valid - * all the time between get_attr and put_attr! +/** + * show_scaling_available_frequencies - show available normal frequencies for + * the specified CPU */ -void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, - unsigned int cpu) +static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy, + char *buf) { - pr_debug("setting show_table for cpu %u to %p\n", cpu, table); - per_cpu(cpufreq_show_table, cpu) = table; + return show_available_freqs(policy, buf, false); } -EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); +cpufreq_attr_available_freq(scaling_available); +EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); -void cpufreq_frequency_table_put_attr(unsigned int cpu) +/** + * show_available_boost_freqs - show available boost frequencies for + * the specified CPU + */ +static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy, + char *buf) { - pr_debug("clearing show_table for cpu %u\n", cpu); - per_cpu(cpufreq_show_table, cpu) = NULL; + return show_available_freqs(policy, buf, true); } -EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); +cpufreq_attr_available_freq(scaling_boost); +EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs); + +struct freq_attr *cpufreq_generic_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, +#ifdef CONFIG_CPU_FREQ_BOOST_SW + &cpufreq_freq_attr_scaling_boost_freqs, +#endif + NULL, +}; +EXPORT_SYMBOL_GPL(cpufreq_generic_attr); -void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) +int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table) { - pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", - policy->cpu, policy->last_cpu); - per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table, - policy->last_cpu); - per_cpu(cpufreq_show_table, policy->last_cpu) = NULL; + int ret = cpufreq_frequency_table_cpuinfo(policy, table); + + if (!ret) + policy->freq_table = table; + + return ret; } +EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); + +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) { - return per_cpu(cpufreq_show_table, cpu); + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + return policy ? policy->freq_table : NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 3dfc99b9ca8..1d723dc8880 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -183,7 +183,7 @@ static void gx_write_byte(int reg, int value) * gx_detect_chipset: * **/ -static __init struct pci_dev *gx_detect_chipset(void) +static struct pci_dev * __init gx_detect_chipset(void) { struct pci_dev *gx_pci = NULL; @@ -265,7 +265,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) freqs.new = new_khz; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); local_irq_save(flags); if (new_khz != stock_freq) { @@ -314,7 +314,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) gx_params->pci_suscfg = suscfg; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", gx_params->on_duration * 32, gx_params->off_duration * 32); @@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy, static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) { - unsigned int maxfreq, curfreq; + unsigned int maxfreq; if (!policy || policy->cpu != 0) return -ENODEV; @@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; stock_freq = maxfreq; - curfreq = gx_get_cpuspeed(0); pr_debug("cpu max frequency is %d.\n", maxfreq); - pr_debug("cpu current frequency is %dkHz.\n", curfreq); /* setup basic struct for cpufreq API */ policy->cpu = 0; @@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) else policy->min = maxfreq / POLICY_MIN_DIV; policy->max = maxfreq; - policy->cur = curfreq; policy->cpuinfo.min_freq = maxfreq / max_duration; policy->cpuinfo.max_freq = maxfreq; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; @@ -446,7 +443,6 @@ static struct cpufreq_driver gx_suspmod_driver = { .target = cpufreq_gx_target, .init = cpufreq_gx_cpu_init, .name = "gx-suspmod", - .owner = THIS_MODULE, }; static int __init cpufreq_gx_init(void) @@ -466,7 +462,7 @@ static int __init cpufreq_gx_init(void) pr_debug("geode suspend modulation available.\n"); - params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); + params = kzalloc(sizeof(*params), GFP_KERNEL); if (params == NULL) return -ENOMEM; diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index b61b5a3fad6..bf8902a0866 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -66,26 +66,21 @@ static int hb_cpufreq_driver_init(void) struct device_node *np; int ret; - if (!of_machine_is_compatible("calxeda,highbank")) + if ((!of_machine_is_compatible("calxeda,highbank")) && + (!of_machine_is_compatible("calxeda,ecx-2000"))) return -ENODEV; - for_each_child_of_node(of_find_node_by_path("/cpus"), np) - if (of_get_property(np, "operating-points", NULL)) - break; - - if (!np) { - pr_err("failed to find highbank cpufreq node\n"); - return -ENOENT; - } - cpu_dev = get_cpu_device(0); if (!cpu_dev) { pr_err("failed to get highbank cpufreq device\n"); - ret = -ENODEV; - goto out_put_node; + return -ENODEV; } - cpu_dev->of_node = np; + np = of_node_get(cpu_dev->of_node); + if (!np) { + pr_err("failed to find highbank cpufreq node\n"); + return -ENOENT; + } cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c index c0075dbaa63..c30aaa6a54e 100644 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c @@ -141,7 +141,6 @@ processor_set_freq ( { int ret = 0; u32 value = 0; - struct cpufreq_freqs cpufreq_freqs; cpumask_t saved_mask; int retval; @@ -168,13 +167,6 @@ processor_set_freq ( pr_debug("Transitioning from P%d to P%d\n", data->acpi_data.state, state); - /* cpufreq frequency struct */ - cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; - cpufreq_freqs.new = data->freq_table[state].frequency; - - /* notify cpufreq */ - cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE); - /* * First we write the target state's 'control' value to the * control_register. @@ -186,22 +178,11 @@ processor_set_freq ( ret = processor_set_pstate(value); if (ret) { - unsigned int tmp = cpufreq_freqs.new; - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_POSTCHANGE); - cpufreq_freqs.new = cpufreq_freqs.old; - cpufreq_freqs.old = tmp; - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_POSTCHANGE); printk(KERN_WARNING "Transition failed with error %d\n", ret); retval = -ENODEV; goto migrate_end; } - cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE); - data->acpi_data.state = state; retval = 0; @@ -227,42 +208,11 @@ acpi_cpufreq_get ( static int acpi_cpufreq_target ( struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - unsigned int next_state = 0; - unsigned int result = 0; - - pr_debug("acpi_cpufreq_setpolicy\n"); - - result = cpufreq_frequency_table_target(policy, - data->freq_table, target_freq, relation, &next_state); - if (result) - return (result); - - result = processor_set_freq(data, policy, next_state); - - return (result); -} - - -static int -acpi_cpufreq_verify ( - struct cpufreq_policy *policy) + unsigned int index) { - unsigned int result = 0; - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_verify\n"); - - result = cpufreq_frequency_table_verify(policy, - data->freq_table); - - return (result); + return processor_set_freq(acpi_io_data[policy->cpu], policy, index); } - static int acpi_cpufreq_cpu_init ( struct cpufreq_policy *policy) @@ -274,7 +224,7 @@ acpi_cpufreq_cpu_init ( pr_debug("acpi_cpufreq_cpu_init\n"); - data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return (-ENOMEM); @@ -304,7 +254,7 @@ acpi_cpufreq_cpu_init ( } /* alloc freq_table */ - data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * + data->freq_table = kzalloc(sizeof(*data->freq_table) * (data->acpi_data.state_count + 1), GFP_KERNEL); if (!data->freq_table) { @@ -321,12 +271,10 @@ acpi_cpufreq_cpu_init ( data->acpi_data.states[i].transition_latency * 1000; } } - policy->cur = processor_get_freq(data, policy->cpu); /* table init */ for (i = 0; i <= data->acpi_data.state_count; i++) { - data->freq_table[i].index = i; if (i < data->acpi_data.state_count) { data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; @@ -335,7 +283,7 @@ acpi_cpufreq_cpu_init ( } } - result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); + result = cpufreq_table_validate_and_show(policy, data->freq_table); if (result) { goto err_freqfree; } @@ -356,8 +304,6 @@ acpi_cpufreq_cpu_init ( (u32) data->acpi_data.states[i].status, (u32) data->acpi_data.states[i].control); - cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); - /* the first call to ->target() should result in us actually * writing something to the appropriate registers. */ data->resume = 1; @@ -385,7 +331,6 @@ acpi_cpufreq_cpu_exit ( pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { - cpufreq_frequency_table_put_attr(policy->cpu); acpi_io_data[policy->cpu] = NULL; acpi_processor_unregister_performance(&data->acpi_data, policy->cpu); @@ -396,21 +341,14 @@ acpi_cpufreq_cpu_exit ( } -static struct freq_attr* acpi_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = acpi_cpufreq_verify, - .target = acpi_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = acpi_cpufreq_target, .get = acpi_cpufreq_get, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, .name = "acpi-cpufreq", - .owner = THIS_MODULE, - .attr = acpi_cpufreq_attr, + .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index b78bc35973b..af366c21d4b 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -7,12 +7,12 @@ */ #include <linux/clk.h> +#include <linux/cpu.h> #include <linux/cpufreq.h> -#include <linux/delay.h> #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> @@ -34,77 +34,54 @@ static struct device *cpu_dev; static struct cpufreq_frequency_table *freq_table; static unsigned int transition_latency; -static int imx6q_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int imx6q_get_speed(unsigned int cpu) -{ - return clk_get_rate(arm_clk) / 1000; -} +static u32 *imx6_soc_volt; +static u32 soc_opp_count; -static int imx6q_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) +static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) { - struct cpufreq_freqs freqs; - struct opp *opp; + struct dev_pm_opp *opp; unsigned long freq_hz, volt, volt_old; - unsigned int index; + unsigned int old_freq, new_freq; int ret; - ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &index); - if (ret) { - dev_err(cpu_dev, "failed to match target frequency %d: %d\n", - target_freq, ret); - return ret; - } - - freqs.new = freq_table[index].frequency; - freq_hz = freqs.new * 1000; - freqs.old = clk_get_rate(arm_clk) / 1000; - - if (freqs.old == freqs.new) - return 0; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + new_freq = freq_table[index].frequency; + freq_hz = new_freq * 1000; + old_freq = clk_get_rate(arm_clk) / 1000; rcu_read_lock(); - opp = opp_find_freq_ceil(cpu_dev, &freq_hz); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); if (IS_ERR(opp)) { rcu_read_unlock(); dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz); return PTR_ERR(opp); } - volt = opp_get_voltage(opp); + volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); volt_old = regulator_get_voltage(arm_reg); dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", - freqs.old / 1000, volt_old / 1000, - freqs.new / 1000, volt / 1000); + old_freq / 1000, volt_old / 1000, + new_freq / 1000, volt / 1000); /* scaling up? scale voltage before frequency */ - if (freqs.new > freqs.old) { + if (new_freq > old_freq) { + ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret); + return ret; + } + ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret); + return ret; + } ret = regulator_set_voltage_tol(arm_reg, volt, 0); if (ret) { dev_err(cpu_dev, "failed to scale vddarm up: %d\n", ret); return ret; } - - /* - * Need to increase vddpu and vddsoc for safety - * if we are about to run at 1.2 GHz. - */ - if (freqs.new == FREQ_1P2_GHZ / 1000) { - regulator_set_voltage_tol(pu_reg, - PU_SOC_VOLTAGE_HIGH, 0); - regulator_set_voltage_tol(soc_reg, - PU_SOC_VOLTAGE_HIGH, 0); - } } /* @@ -116,32 +93,15 @@ static int imx6q_set_target(struct cpufreq_policy *policy, * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it * - Disable pll2_pfd2_396m_clk */ - clk_prepare_enable(pll2_pfd2_396m_clk); clk_set_parent(step_clk, pll2_pfd2_396m_clk); clk_set_parent(pll1_sw_clk, step_clk); if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { - clk_set_rate(pll1_sys_clk, freqs.new * 1000); - /* - * If we are leaving 396 MHz set-point, we need to enable - * pll1_sys_clk and disable pll2_pfd2_396m_clk to keep - * their use count correct. - */ - if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) { - clk_prepare_enable(pll1_sys_clk); - clk_disable_unprepare(pll2_pfd2_396m_clk); - } + clk_set_rate(pll1_sys_clk, new_freq * 1000); clk_set_parent(pll1_sw_clk, pll1_sys_clk); - clk_disable_unprepare(pll2_pfd2_396m_clk); - } else { - /* - * Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient - * to provide the frequency. - */ - clk_disable_unprepare(pll1_sys_clk); } /* Ensure the arm clock divider is what we expect */ - ret = clk_set_rate(arm_clk, freqs.new * 1000); + ret = clk_set_rate(arm_clk, new_freq * 1000); if (ret) { dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); regulator_set_voltage_tol(arm_reg, volt_old, 0); @@ -149,148 +109,186 @@ static int imx6q_set_target(struct cpufreq_policy *policy, } /* scaling down? scale voltage after frequency */ - if (freqs.new < freqs.old) { + if (new_freq < old_freq) { ret = regulator_set_voltage_tol(arm_reg, volt, 0); - if (ret) + if (ret) { dev_warn(cpu_dev, "failed to scale vddarm down: %d\n", ret); - - if (freqs.old == FREQ_1P2_GHZ / 1000) { - regulator_set_voltage_tol(pu_reg, - PU_SOC_VOLTAGE_NORMAL, 0); - regulator_set_voltage_tol(soc_reg, - PU_SOC_VOLTAGE_NORMAL, 0); + ret = 0; + } + ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret); + ret = 0; + } + ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret); + ret = 0; } } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return 0; } static int imx6q_cpufreq_init(struct cpufreq_policy *policy) { - int ret; - - ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (ret) { - dev_err(cpu_dev, "invalid frequency table: %d\n", ret); - return ret; - } - - policy->cpuinfo.transition_latency = transition_latency; - policy->cur = clk_get_rate(arm_clk) / 1000; - cpumask_setall(policy->cpus); - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - - return 0; + policy->clk = arm_clk; + return cpufreq_generic_init(policy, freq_table, transition_latency); } -static int imx6q_cpufreq_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *imx6q_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver imx6q_cpufreq_driver = { - .verify = imx6q_verify_speed, - .target = imx6q_set_target, - .get = imx6q_get_speed, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = imx6q_set_target, + .get = cpufreq_generic_get, .init = imx6q_cpufreq_init, - .exit = imx6q_cpufreq_exit, .name = "imx6q-cpufreq", - .attr = imx6q_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int imx6q_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - struct opp *opp; + struct dev_pm_opp *opp; unsigned long min_volt, max_volt; int num, ret; + const struct property *prop; + const __be32 *val; + u32 nr, i, j; + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) { + pr_err("failed to get cpu0 device\n"); + return -ENODEV; + } - cpu_dev = &pdev->dev; - - np = of_find_node_by_path("/cpus/cpu@0"); + np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find cpu0 node\n"); return -ENOENT; } - cpu_dev->of_node = np; - - arm_clk = devm_clk_get(cpu_dev, "arm"); - pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys"); - pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw"); - step_clk = devm_clk_get(cpu_dev, "step"); - pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m"); + arm_clk = clk_get(cpu_dev, "arm"); + pll1_sys_clk = clk_get(cpu_dev, "pll1_sys"); + pll1_sw_clk = clk_get(cpu_dev, "pll1_sw"); + step_clk = clk_get(cpu_dev, "step"); + pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m"); if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) || IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) { dev_err(cpu_dev, "failed to get clocks\n"); ret = -ENOENT; - goto put_node; + goto put_clk; } - arm_reg = devm_regulator_get(cpu_dev, "arm"); - pu_reg = devm_regulator_get(cpu_dev, "pu"); - soc_reg = devm_regulator_get(cpu_dev, "soc"); + arm_reg = regulator_get(cpu_dev, "arm"); + pu_reg = regulator_get(cpu_dev, "pu"); + soc_reg = regulator_get(cpu_dev, "soc"); if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) { dev_err(cpu_dev, "failed to get regulators\n"); ret = -ENOENT; - goto put_node; + goto put_reg; } - /* We expect an OPP table supplied by platform */ - num = opp_get_opp_count(cpu_dev); + /* + * We expect an OPP table supplied by platform. + * Just, incase the platform did not supply the OPP + * table, it will try to get it. + */ + num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { - ret = num; - dev_err(cpu_dev, "no OPP table is found: %d\n", ret); - goto put_node; + ret = of_init_opp_table(cpu_dev); + if (ret < 0) { + dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); + goto put_reg; + } + + num = dev_pm_opp_get_opp_count(cpu_dev); + if (num < 0) { + ret = num; + dev_err(cpu_dev, "no OPP table is found: %d\n", ret); + goto put_reg; + } } - ret = opp_init_cpufreq_table(cpu_dev, &freq_table); + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto put_node; + goto put_reg; + } + + /* Make imx6_soc_volt array's size same as arm opp number */ + imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL); + if (imx6_soc_volt == NULL) { + ret = -ENOMEM; + goto free_freq_table; + } + + prop = of_find_property(np, "fsl,soc-operating-points", NULL); + if (!prop || !prop->value) + goto soc_opp_out; + + /* + * Each OPP is a set of tuples consisting of frequency and + * voltage like <freq-kHz vol-uV>. + */ + nr = prop->length / sizeof(u32); + if (nr % 2 || (nr / 2) < num) + goto soc_opp_out; + + for (j = 0; j < num; j++) { + val = prop->value; + for (i = 0; i < nr / 2; i++) { + unsigned long freq = be32_to_cpup(val++); + unsigned long volt = be32_to_cpup(val++); + if (freq_table[j].frequency == freq) { + imx6_soc_volt[soc_opp_count++] = volt; + break; + } + } + } + +soc_opp_out: + /* use fixed soc opp volt if no valid soc opp info found in dtb */ + if (soc_opp_count != num) { + dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n"); + for (j = 0; j < num; j++) + imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL; + if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ) + imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH; } if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; /* + * Calculate the ramp time for max voltage change in the + * VDDSOC and VDDPU regulators. + */ + ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); + if (ret > 0) + transition_latency += ret * 1000; + ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); + if (ret > 0) + transition_latency += ret * 1000; + + /* * OPP is maintained in order of increasing frequency, and * freq_table initialised from OPP is therefore sorted in the * same order. */ rcu_read_lock(); - opp = opp_find_freq_exact(cpu_dev, + opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); - min_volt = opp_get_voltage(opp); - opp = opp_find_freq_exact(cpu_dev, + min_volt = dev_pm_opp_get_voltage(opp); + opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[--num].frequency * 1000, true); - max_volt = opp_get_voltage(opp); + max_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); if (ret > 0) transition_latency += ret * 1000; - /* Count vddpu and vddsoc latency in for 1.2 GHz support */ - if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) { - ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL, - PU_SOC_VOLTAGE_HIGH); - if (ret > 0) - transition_latency += ret * 1000; - ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL, - PU_SOC_VOLTAGE_HIGH); - if (ret > 0) - transition_latency += ret * 1000; - } - ret = cpufreq_register_driver(&imx6q_cpufreq_driver); if (ret) { dev_err(cpu_dev, "failed register driver: %d\n", ret); @@ -301,8 +299,25 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) return 0; free_freq_table: - opp_free_cpufreq_table(cpu_dev, &freq_table); -put_node: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +put_reg: + if (!IS_ERR(arm_reg)) + regulator_put(arm_reg); + if (!IS_ERR(pu_reg)) + regulator_put(pu_reg); + if (!IS_ERR(soc_reg)) + regulator_put(soc_reg); +put_clk: + if (!IS_ERR(arm_clk)) + clk_put(arm_clk); + if (!IS_ERR(pll1_sys_clk)) + clk_put(pll1_sys_clk); + if (!IS_ERR(pll1_sw_clk)) + clk_put(pll1_sw_clk); + if (!IS_ERR(step_clk)) + clk_put(step_clk); + if (!IS_ERR(pll2_pfd2_396m_clk)) + clk_put(pll2_pfd2_396m_clk); of_node_put(np); return ret; } @@ -310,7 +325,15 @@ put_node: static int imx6q_cpufreq_remove(struct platform_device *pdev) { cpufreq_unregister_driver(&imx6q_cpufreq_driver); - opp_free_cpufreq_table(cpu_dev, &freq_table); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); + regulator_put(arm_reg); + regulator_put(pu_reg); + regulator_put(soc_reg); + clk_put(arm_clk); + clk_put(pll1_sys_clk); + clk_put(pll1_sw_clk); + clk_put(step_clk); + clk_put(pll2_pfd2_396m_clk); return 0; } diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index f7c99df0880..e5122f1bfe7 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -15,18 +15,19 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_address.h> -#include <mach/hardware.h> -#include <mach/platform.h> #include <asm/mach-types.h> #include <asm/hardware/icst.h> -static struct cpufreq_driver integrator_driver; +static void __iomem *cm_base; +/* The cpufreq driver only use the OSC register */ +#define INTEGRATOR_HDR_OSC_OFFSET 0x08 +#define INTEGRATOR_HDR_LOCK_OFFSET 0x14 -#define CM_ID __io_address(INTEGRATOR_HDR_ID) -#define CM_OSC __io_address(INTEGRATOR_HDR_OSC) -#define CM_STAT __io_address(INTEGRATOR_HDR_STAT) -#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK) +static struct cpufreq_driver integrator_driver; static const struct icst_params lclk_params = { .ref = 24000000, @@ -59,9 +60,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy) { struct icst_vco vco; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); policy->max = icst_hz(&cclk_params, vco) / 1000; @@ -69,10 +68,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy) vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); policy->min = icst_hz(&cclk_params, vco) / 1000; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - + cpufreq_verify_within_cpu_limits(policy); return 0; } @@ -100,7 +96,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, BUG_ON(cpu != smp_processor_id()); /* get current setting */ - cm_osc = __raw_readl(CM_OSC); + cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; @@ -126,9 +122,9 @@ static int integrator_set_target(struct cpufreq_policy *policy, return 0; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); - cm_osc = __raw_readl(CM_OSC); + cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); if (machine_is_integrator()) { cm_osc &= 0xfffff800; @@ -138,16 +134,16 @@ static int integrator_set_target(struct cpufreq_policy *policy, } cm_osc |= vco.v; - __raw_writel(0xa05f, CM_LOCK); - __raw_writel(cm_osc, CM_OSC); - __raw_writel(0, CM_LOCK); + __raw_writel(0xa05f, cm_base + INTEGRATOR_HDR_LOCK_OFFSET); + __raw_writel(cm_osc, cm_base + INTEGRATOR_HDR_OSC_OFFSET); + __raw_writel(0, cm_base + INTEGRATOR_HDR_LOCK_OFFSET); /* * Restore the CPUs allowed mask. */ set_cpus_allowed(current, cpus_allowed); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); return 0; } @@ -165,7 +161,7 @@ static unsigned int integrator_get(unsigned int cpu) BUG_ON(cpu != smp_processor_id()); /* detect memory etc. */ - cm_osc = __raw_readl(CM_OSC); + cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; @@ -186,15 +182,15 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy) { /* set default policy and cpuinfo */ - policy->cpuinfo.max_freq = 160000; - policy->cpuinfo.min_freq = 12000; + policy->max = policy->cpuinfo.max_freq = 160000; + policy->min = policy->cpuinfo.min_freq = 12000; policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ - policy->cur = policy->min = policy->max = integrator_get(policy->cpu); return 0; } static struct cpufreq_driver integrator_driver = { + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = integrator_verify_policy, .target = integrator_set_target, .get = integrator_get, @@ -202,19 +198,43 @@ static struct cpufreq_driver integrator_driver = { .name = "integrator", }; -static int __init integrator_cpu_init(void) +static int __init integrator_cpufreq_probe(struct platform_device *pdev) { + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!cm_base) + return -ENODEV; + return cpufreq_register_driver(&integrator_driver); } -static void __exit integrator_cpu_exit(void) +static void __exit integrator_cpufreq_remove(struct platform_device *pdev) { cpufreq_unregister_driver(&integrator_driver); } +static const struct of_device_id integrator_cpufreq_match[] = { + { .compatible = "arm,core-module-integrator"}, + { }, +}; + +static struct platform_driver integrator_cpufreq_driver = { + .driver = { + .name = "integrator-cpufreq", + .owner = THIS_MODULE, + .of_match_table = integrator_cpufreq_match, + }, + .remove = __exit_p(integrator_cpufreq_remove), +}; + +module_platform_driver_probe(integrator_cpufreq_driver, + integrator_cpufreq_probe); + MODULE_AUTHOR ("Russell M. King"); MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); MODULE_LICENSE ("GPL"); - -module_init(integrator_cpu_init); -module_exit(integrator_cpu_exit); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 07f2840ad80..86631cb6f7d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -25,18 +25,24 @@ #include <linux/types.h> #include <linux/fs.h> #include <linux/debugfs.h> +#include <linux/acpi.h> #include <trace/events/power.h> #include <asm/div64.h> #include <asm/msr.h> #include <asm/cpu_device_id.h> -#define SAMPLE_COUNT 3 +#define BYT_RATIOS 0x66a +#define BYT_VIDS 0x66b +#define BYT_TURBO_RATIOS 0x66c +#define BYT_TURBO_VIDS 0x66d + #define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) + static inline int32_t mul_fp(int32_t x, int32_t y) { return ((int64_t)x * (int64_t)y) >> FRAC_BITS; @@ -48,10 +54,11 @@ static inline int32_t div_fp(int32_t x, int32_t y) } struct sample { - int core_pct_busy; + int32_t core_pct_busy; u64 aperf; u64 mperf; int freq; + ktime_t time; }; struct pstate_data { @@ -61,6 +68,13 @@ struct pstate_data { int turbo_pstate; }; +struct vid_data { + int min; + int max; + int turbo; + int32_t ratio; +}; + struct _pid { int setpoint; int32_t integral; @@ -68,26 +82,22 @@ struct _pid { int32_t i_gain; int32_t d_gain; int deadband; - int last_err; + int32_t last_err; }; struct cpudata { int cpu; - char name[64]; - struct timer_list timer; - struct pstate_adjust_policy *pstate_policy; struct pstate_data pstate; + struct vid_data vid; struct _pid pid; - int min_pstate_count; - + ktime_t last_sample_time; u64 prev_aperf; u64 prev_mperf; - int sample_ptr; - struct sample samples[SAMPLE_COUNT]; + struct sample sample; }; static struct cpudata **all_cpu_data; @@ -100,17 +110,25 @@ struct pstate_adjust_policy { int i_gain_pct; }; -static struct pstate_adjust_policy default_policy = { - .sample_rate_ms = 10, - .deadband = 0, - .setpoint = 109, - .p_gain_pct = 17, - .d_gain_pct = 0, - .i_gain_pct = 4, +struct pstate_funcs { + int (*get_max)(void); + int (*get_min)(void); + int (*get_turbo)(void); + void (*set)(struct cpudata*, int pstate); + void (*get_vid)(struct cpudata *); +}; + +struct cpu_defaults { + struct pstate_adjust_policy pid_policy; + struct pstate_funcs funcs; }; +static struct pstate_adjust_policy pid_params; +static struct pstate_funcs pstate_funcs; + struct perf_limits { int no_turbo; + int turbo_disabled; int max_perf_pct; int min_perf_pct; int32_t max_perf; @@ -134,7 +152,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy, pid->setpoint = setpoint; pid->deadband = deadband; pid->integral = int_tofp(integral); - pid->last_err = setpoint - busy; + pid->last_err = int_tofp(setpoint) - int_tofp(busy); } static inline void pid_p_gain_set(struct _pid *pid, int percent) @@ -153,16 +171,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent) pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); } -static signed int pid_calc(struct _pid *pid, int busy) +static signed int pid_calc(struct _pid *pid, int32_t busy) { - signed int err, result; + signed int result; int32_t pterm, dterm, fp_error; int32_t integral_limit; - err = pid->setpoint - busy; - fp_error = int_tofp(err); + fp_error = int_tofp(pid->setpoint) - busy; - if (abs(err) <= pid->deadband) + if (abs(fp_error) <= int_tofp(pid->deadband)) return 0; pterm = mul_fp(pid->p_gain, fp_error); @@ -176,24 +193,24 @@ static signed int pid_calc(struct _pid *pid, int busy) if (pid->integral < -integral_limit) pid->integral = -integral_limit; - dterm = mul_fp(pid->d_gain, (err - pid->last_err)); - pid->last_err = err; + dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); + pid->last_err = fp_error; result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; - + result = result + (1 << (FRAC_BITS-1)); return (signed int)fp_toint(result); } static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) { - pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct); - pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct); - pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct); + pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); + pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); + pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); pid_reset(&cpu->pid, - cpu->pstate_policy->setpoint, + pid_params.setpoint, 100, - cpu->pstate_policy->deadband, + pid_params.deadband, 0); } @@ -227,12 +244,12 @@ struct pid_param { }; static struct pid_param pid_files[] = { - {"sample_rate_ms", &default_policy.sample_rate_ms}, - {"d_gain_pct", &default_policy.d_gain_pct}, - {"i_gain_pct", &default_policy.i_gain_pct}, - {"deadband", &default_policy.deadband}, - {"setpoint", &default_policy.setpoint}, - {"p_gain_pct", &default_policy.p_gain_pct}, + {"sample_rate_ms", &pid_params.sample_rate_ms}, + {"d_gain_pct", &pid_params.d_gain_pct}, + {"i_gain_pct", &pid_params.i_gain_pct}, + {"deadband", &pid_params.deadband}, + {"setpoint", &pid_params.setpoint}, + {"p_gain_pct", &pid_params.p_gain_pct}, {NULL, NULL} }; @@ -271,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; limits.no_turbo = clamp_t(int, input, 0 , 1); - + if (limits.turbo_disabled) { + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + limits.no_turbo = limits.turbo_disabled; + } return count; } @@ -337,42 +357,153 @@ static void intel_pstate_sysfs_expose_params(void) } /************************** sysfs end ************************/ +static int byt_get_min_pstate(void) +{ + u64 value; + rdmsrl(BYT_RATIOS, value); + return (value >> 8) & 0x7F; +} + +static int byt_get_max_pstate(void) +{ + u64 value; + rdmsrl(BYT_RATIOS, value); + return (value >> 16) & 0x7F; +} + +static int byt_get_turbo_pstate(void) +{ + u64 value; + rdmsrl(BYT_TURBO_RATIOS, value); + return value & 0x7F; +} + +static void byt_set_pstate(struct cpudata *cpudata, int pstate) +{ + u64 val; + int32_t vid_fp; + u32 vid; + + val = pstate << 8; + if (limits.no_turbo && !limits.turbo_disabled) + val |= (u64)1 << 32; + + vid_fp = cpudata->vid.min + mul_fp( + int_tofp(pstate - cpudata->pstate.min_pstate), + cpudata->vid.ratio); + + vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); + vid = fp_toint(vid_fp); + + if (pstate > cpudata->pstate.max_pstate) + vid = cpudata->vid.turbo; -static int intel_pstate_min_pstate(void) + val |= vid; + + wrmsrl(MSR_IA32_PERF_CTL, val); +} + +static void byt_get_vid(struct cpudata *cpudata) +{ + u64 value; + + + rdmsrl(BYT_VIDS, value); + cpudata->vid.min = int_tofp((value >> 8) & 0x7f); + cpudata->vid.max = int_tofp((value >> 16) & 0x7f); + cpudata->vid.ratio = div_fp( + cpudata->vid.max - cpudata->vid.min, + int_tofp(cpudata->pstate.max_pstate - + cpudata->pstate.min_pstate)); + + rdmsrl(BYT_TURBO_VIDS, value); + cpudata->vid.turbo = value & 0x7f; +} + + +static int core_get_min_pstate(void) { u64 value; rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 40) & 0xFF; } -static int intel_pstate_max_pstate(void) +static int core_get_max_pstate(void) { u64 value; rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 8) & 0xFF; } -static int intel_pstate_turbo_pstate(void) +static int core_get_turbo_pstate(void) { u64 value; int nont, ret; rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); - nont = intel_pstate_max_pstate(); + nont = core_get_max_pstate(); ret = ((value) & 255); if (ret <= nont) ret = nont; return ret; } +static void core_set_pstate(struct cpudata *cpudata, int pstate) +{ + u64 val; + + val = pstate << 8; + if (limits.no_turbo && !limits.turbo_disabled) + val |= (u64)1 << 32; + + wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); +} + +static struct cpu_defaults core_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 97, + .p_gain_pct = 20, + .d_gain_pct = 0, + .i_gain_pct = 0, + }, + .funcs = { + .get_max = core_get_max_pstate, + .get_min = core_get_min_pstate, + .get_turbo = core_get_turbo_pstate, + .set = core_set_pstate, + }, +}; + +static struct cpu_defaults byt_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 97, + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, + }, + .funcs = { + .get_max = byt_get_max_pstate, + .get_min = byt_get_min_pstate, + .get_turbo = byt_get_turbo_pstate, + .set = byt_set_pstate, + .get_vid = byt_get_vid, + }, +}; + + static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) { int max_perf = cpu->pstate.turbo_pstate; + int max_perf_adj; int min_perf; if (limits.no_turbo) max_perf = cpu->pstate.max_pstate; - max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); - *max = clamp_t(int, max_perf, + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); + *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); @@ -394,8 +525,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) trace_cpu_frequency(pstate * 100000, cpu->cpu); cpu->pstate.current_pstate = pstate; - wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); + pstate_funcs.set(cpu, pstate); } static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) @@ -415,27 +546,31 @@ static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { - sprintf(cpu->name, "Intel 2nd generation core"); - - cpu->pstate.min_pstate = intel_pstate_min_pstate(); - cpu->pstate.max_pstate = intel_pstate_max_pstate(); - cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate(); + cpu->pstate.min_pstate = pstate_funcs.get_min(); + cpu->pstate.max_pstate = pstate_funcs.get_max(); + cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); - /* - * goto max pstate so we don't slow up boot if we are built-in if we are - * a module we will take care of it during normal operation - */ - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); + if (pstate_funcs.get_vid) + pstate_funcs.get_vid(cpu); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } -static inline void intel_pstate_calc_busy(struct cpudata *cpu, - struct sample *sample) +static inline void intel_pstate_calc_busy(struct cpudata *cpu) { - u64 core_pct; - core_pct = div64_u64(sample->aperf * 100, sample->mperf); - sample->freq = cpu->pstate.max_pstate * core_pct * 1000; + struct sample *sample = &cpu->sample; + int64_t core_pct; + int32_t rem; + + core_pct = int_tofp(sample->aperf) * int_tofp(100); + core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem); + + if ((rem << 1) >= int_tofp(sample->mperf)) + core_pct += 1; - sample->core_pct_busy = core_pct; + sample->freq = fp_toint( + mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); + + sample->core_pct_busy = (int32_t)core_pct; } static inline void intel_pstate_sample(struct cpudata *cpu) @@ -444,13 +579,18 @@ static inline void intel_pstate_sample(struct cpudata *cpu) rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; - cpu->samples[cpu->sample_ptr].aperf = aperf; - cpu->samples[cpu->sample_ptr].mperf = mperf; - cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; - cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; - intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); + aperf = aperf >> FRAC_BITS; + mperf = mperf >> FRAC_BITS; + + cpu->last_sample_time = cpu->sample.time; + cpu->sample.time = ktime_get(); + cpu->sample.aperf = aperf; + cpu->sample.mperf = mperf; + cpu->sample.aperf -= cpu->prev_aperf; + cpu->sample.mperf -= cpu->prev_mperf; + + intel_pstate_calc_busy(cpu); cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; @@ -460,27 +600,37 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) { int sample_time, delay; - sample_time = cpu->pstate_policy->sample_rate_ms; + sample_time = pid_params.sample_rate_ms; delay = msecs_to_jiffies(sample_time); mod_timer_pinned(&cpu->timer, jiffies + delay); } -static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) +static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) { - int32_t busy_scaled; - int32_t core_busy, turbo_pstate, current_pstate; + int32_t core_busy, max_pstate, current_pstate, sample_ratio; + u32 duration_us; + u32 sample_time; - core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); - turbo_pstate = int_tofp(cpu->pstate.turbo_pstate); + core_busy = cpu->sample.core_pct_busy; + max_pstate = int_tofp(cpu->pstate.max_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate); - busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate)); + core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); + + sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC); + duration_us = (u32) ktime_us_delta(cpu->sample.time, + cpu->last_sample_time); + if (duration_us > sample_time * 3) { + sample_ratio = div_fp(int_tofp(sample_time), + int_tofp(duration_us)); + core_busy = mul_fp(core_busy, sample_ratio); + } - return fp_toint(busy_scaled); + return core_busy; } static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) { - int busy_scaled; + int32_t busy_scaled; struct _pid *pid; signed int ctl = 0; int steps; @@ -491,6 +641,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) ctl = pid_calc(pid, busy_scaled); steps = abs(ctl); + if (ctl < 0) intel_pstate_pstate_increase(cpu, steps); else @@ -500,53 +651,58 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; + struct sample *sample; intel_pstate_sample(cpu); + + sample = &cpu->sample; + intel_pstate_adjust_busy_pstate(cpu); - if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { - cpu->min_pstate_count++; - if (!(cpu->min_pstate_count % 5)) { - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); - } - } else - cpu->min_pstate_count = 0; + trace_pstate_sample(fp_toint(sample->core_pct_busy), + fp_toint(intel_pstate_get_scaled_busy(cpu)), + cpu->pstate.current_pstate, + sample->mperf, + sample->aperf, + sample->freq); intel_pstate_set_sample_time(cpu); } #define ICPU(model, policy) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ + (unsigned long)&policy } static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - ICPU(0x2a, default_policy), - ICPU(0x2d, default_policy), - ICPU(0x3a, default_policy), + ICPU(0x2a, core_params), + ICPU(0x2d, core_params), + ICPU(0x37, byt_params), + ICPU(0x3a, core_params), + ICPU(0x3c, core_params), + ICPU(0x3d, core_params), + ICPU(0x3e, core_params), + ICPU(0x3f, core_params), + ICPU(0x45, core_params), + ICPU(0x46, core_params), + ICPU(0x4f, core_params), + ICPU(0x56, core_params), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); static int intel_pstate_init_cpu(unsigned int cpunum) { - - const struct x86_cpu_id *id; struct cpudata *cpu; - id = x86_match_cpu(intel_pstate_cpu_ids); - if (!id) - return -ENODEV; - all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); if (!all_cpu_data[cpunum]) return -ENOMEM; cpu = all_cpu_data[cpunum]; + cpu->cpu = cpunum; intel_pstate_get_cpu_pstates(cpu); - cpu->cpu = cpunum; - cpu->pstate_policy = - (struct pstate_adjust_policy *)id->driver_data; init_timer_deferrable(&cpu->timer); cpu->timer.function = intel_pstate_timer_func; cpu->timer.data = @@ -554,7 +710,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); intel_pstate_sample(cpu); - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); add_timer_on(&cpu->timer, cpunum); @@ -571,7 +726,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) cpu = all_cpu_data[cpu_num]; if (!cpu) return 0; - sample = &cpu->samples[cpu->sample_ptr]; + sample = &cpu->sample; return sample->freq; } @@ -589,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_perf = int_tofp(1); limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); - limits.no_turbo = 0; + limits.no_turbo = limits.turbo_disabled; return 0; } limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; @@ -606,9 +761,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) static int intel_pstate_verify_policy(struct cpufreq_policy *policy) { - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) @@ -617,20 +770,24 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) return 0; } -static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy) +static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) { - int cpu = policy->cpu; + int cpu_num = policy->cpu; + struct cpudata *cpu = all_cpu_data[cpu_num]; - del_timer(&all_cpu_data[cpu]->timer); - kfree(all_cpu_data[cpu]); - all_cpu_data[cpu] = NULL; - return 0; + pr_info("intel_pstate CPU %d exiting\n", cpu_num); + + del_timer_sync(&all_cpu_data[cpu_num]->timer); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); + kfree(all_cpu_data[cpu_num]); + all_cpu_data[cpu_num] = NULL; } -static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy) +static int intel_pstate_cpu_init(struct cpufreq_policy *policy) { - int rc, min_pstate, max_pstate; struct cpudata *cpu; + int rc; + u64 misc_en; rc = intel_pstate_init_cpu(policy->cpu); if (rc) @@ -638,15 +795,19 @@ static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (!limits.no_turbo && - limits.min_perf_pct == 100 && limits.max_perf_pct == 100) + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { + limits.turbo_disabled = 1; + limits.no_turbo = 1; + } + if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; - intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); - policy->min = min_pstate * 100000; - policy->max = max_pstate * 100000; + policy->min = cpu->pstate.min_pstate * 100000; + policy->max = cpu->pstate.turbo_pstate * 100000; /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; @@ -663,9 +824,8 @@ static struct cpufreq_driver intel_pstate_driver = { .setpolicy = intel_pstate_set_policy, .get = intel_pstate_get, .init = intel_pstate_cpu_init, - .exit = intel_pstate_cpu_exit, + .stop_cpu = intel_pstate_stop_cpu, .name = "intel_pstate", - .owner = THIS_MODULE, }; static int __initdata no_load; @@ -678,9 +838,9 @@ static int intel_pstate_msrs_not_valid(void) rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - if (!intel_pstate_min_pstate() || - !intel_pstate_max_pstate() || - !intel_pstate_turbo_pstate()) + if (!pstate_funcs.get_max() || + !pstate_funcs.get_min() || + !pstate_funcs.get_turbo()) return -ENODEV; rdmsrl(MSR_IA32_APERF, tmp); @@ -693,10 +853,97 @@ static int intel_pstate_msrs_not_valid(void) return 0; } + +static void copy_pid_params(struct pstate_adjust_policy *policy) +{ + pid_params.sample_rate_ms = policy->sample_rate_ms; + pid_params.p_gain_pct = policy->p_gain_pct; + pid_params.i_gain_pct = policy->i_gain_pct; + pid_params.d_gain_pct = policy->d_gain_pct; + pid_params.deadband = policy->deadband; + pid_params.setpoint = policy->setpoint; +} + +static void copy_cpu_funcs(struct pstate_funcs *funcs) +{ + pstate_funcs.get_max = funcs->get_max; + pstate_funcs.get_min = funcs->get_min; + pstate_funcs.get_turbo = funcs->get_turbo; + pstate_funcs.set = funcs->set; + pstate_funcs.get_vid = funcs->get_vid; +} + +#if IS_ENABLED(CONFIG_ACPI) +#include <acpi/processor.h> + +static bool intel_pstate_no_acpi_pss(void) +{ + int i; + + for_each_possible_cpu(i) { + acpi_status status; + union acpi_object *pss; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_processor *pr = per_cpu(processors, i); + + if (!pr) + continue; + + status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); + if (ACPI_FAILURE(status)) + continue; + + pss = buffer.pointer; + if (pss && pss->type == ACPI_TYPE_PACKAGE) { + kfree(pss); + return false; + } + + kfree(pss); + } + + return true; +} + +struct hw_vendor_info { + u16 valid; + char oem_id[ACPI_OEM_ID_SIZE]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; +}; + +/* Hardware vendor-specific info that has its own power management modes */ +static struct hw_vendor_info vendor_info[] = { + {1, "HP ", "ProLiant"}, + {0, "", ""}, +}; + +static bool intel_pstate_platform_pwr_mgmt_exists(void) +{ + struct acpi_table_header hdr; + struct hw_vendor_info *v_info; + + if (acpi_disabled + || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) + return false; + + for (v_info = vendor_info; v_info->valid; v_info++) { + if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) + && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) + && intel_pstate_no_acpi_pss()) + return true; + } + + return false; +} +#else /* CONFIG_ACPI not enabled */ +static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } +#endif /* CONFIG_ACPI */ + static int __init intel_pstate_init(void) { int cpu, rc = 0; const struct x86_cpu_id *id; + struct cpu_defaults *cpu_info; if (no_load) return -ENODEV; @@ -705,6 +952,18 @@ static int __init intel_pstate_init(void) if (!id) return -ENODEV; + /* + * The Intel pstate driver will be ignored if the platform + * firmware has its own power management modes. + */ + if (intel_pstate_platform_pwr_mgmt_exists()) + return -ENODEV; + + cpu_info = (struct cpu_defaults *)id->driver_data; + + copy_pid_params(&cpu_info->pid_policy); + copy_cpu_funcs(&cpu_info->funcs); + if (intel_pstate_msrs_not_valid()) return -ENODEV; @@ -720,6 +979,7 @@ static int __init intel_pstate_init(void) intel_pstate_debug_expose_params(); intel_pstate_sysfs_expose_params(); + return rc; out: get_online_cpus(); diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index b2644af985e..37a480680cd 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -14,7 +14,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/cpufreq.h> -#include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/proc-fns.h> @@ -43,9 +43,9 @@ static struct priv * table. */ static struct cpufreq_frequency_table kirkwood_freq_table[] = { - {STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */ - {STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */ - {0, CPUFREQ_TABLE_END}, + {0, STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */ + {0, STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */ + {0, 0, CPUFREQ_TABLE_END}, }; static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu) @@ -55,69 +55,37 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu) return kirkwood_freq_table[0].frequency; } -static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int index) +static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) { - struct cpufreq_freqs freqs; - unsigned int state = kirkwood_freq_table[index].index; + unsigned int state = kirkwood_freq_table[index].driver_data; unsigned long reg; - freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); - freqs.new = kirkwood_freq_table[index].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n", - kirkwood_freq_table[index].frequency); - dev_dbg(priv.dev, "old frequency was %i KHz\n", - kirkwood_cpufreq_get_cpu_frequency(0)); - - if (freqs.old != freqs.new) { - local_irq_disable(); - - /* Disable interrupts to the CPU */ - reg = readl_relaxed(priv.base); - reg |= CPU_SW_INT_BLK; - writel_relaxed(reg, priv.base); - - switch (state) { - case STATE_CPU_FREQ: - clk_disable(priv.powersave_clk); - break; - case STATE_DDR_FREQ: - clk_enable(priv.powersave_clk); - break; - } - - /* Wait-for-Interrupt, while the hardware changes frequency */ - cpu_do_idle(); + local_irq_disable(); - /* Enable interrupts to the CPU */ - reg = readl_relaxed(priv.base); - reg &= ~CPU_SW_INT_BLK; - writel_relaxed(reg, priv.base); + /* Disable interrupts to the CPU */ + reg = readl_relaxed(priv.base); + reg |= CPU_SW_INT_BLK; + writel_relaxed(reg, priv.base); - local_irq_enable(); + switch (state) { + case STATE_CPU_FREQ: + clk_disable(priv.powersave_clk); + break; + case STATE_DDR_FREQ: + clk_enable(priv.powersave_clk); + break; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, kirkwood_freq_table); -} -static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int index = 0; + /* Wait-for-Interrupt, while the hardware changes frequency */ + cpu_do_idle(); - if (cpufreq_frequency_table_target(policy, kirkwood_freq_table, - target_freq, relation, &index)) - return -EINVAL; + /* Enable interrupts to the CPU */ + reg = readl_relaxed(priv.base); + reg &= ~CPU_SW_INT_BLK; + writel_relaxed(reg, priv.base); - kirkwood_cpufreq_set_cpu_state(policy, index); + local_irq_enable(); return 0; } @@ -125,41 +93,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, /* Module init and exit code */ static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) { - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 5000; /* 5uS */ - policy->cur = kirkwood_cpufreq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table); - if (result) - return result; - - cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu); - - return 0; -} - -static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_generic_init(policy, kirkwood_freq_table, 5000); } -static struct freq_attr *kirkwood_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver kirkwood_cpufreq_driver = { + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .get = kirkwood_cpufreq_get_cpu_frequency, - .verify = kirkwood_cpufreq_verify, - .target = kirkwood_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = kirkwood_cpufreq_target, .init = kirkwood_cpufreq_cpu_init, - .exit = kirkwood_cpufreq_cpu_exit, .name = "kirkwood-cpufreq", - .owner = THIS_MODULE, - .attr = kirkwood_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int kirkwood_cpufreq_probe(struct platform_device *pdev) @@ -175,9 +119,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) if (IS_ERR(priv.base)) return PTR_ERR(priv.base); - np = of_find_node_by_path("/cpus/cpu@0"); - if (!np) + np = of_cpu_device_node_get(0); + if (!np) { + dev_err(&pdev->dev, "failed to get cpu device node\n"); return -ENODEV; + } priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk"); if (IS_ERR(priv.cpu_clk)) { diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index b448638e34d..c913906a719 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, * Sets a new clock ratio. */ -static void longhaul_setstate(struct cpufreq_policy *policy, +static int longhaul_setstate(struct cpufreq_policy *policy, unsigned int table_index) { unsigned int mults_index; @@ -254,14 +254,16 @@ static void longhaul_setstate(struct cpufreq_policy *policy, u32 bm_timeout = 1000; unsigned int dir = 0; - mults_index = longhaul_table[table_index].index; + mults_index = longhaul_table[table_index].driver_data; /* Safety precautions */ mult = mults[mults_index & 0x1f]; if (mult == -1) - return; + return -EINVAL; + speed = calc_speed(mult); if ((speed > highest_speed) || (speed < lowest_speed)) - return; + return -EINVAL; + /* Voltage transition before frequency transition? */ if (can_scale_voltage && longhaul_index < table_index) dir = 1; @@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy, freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: @@ -385,12 +385,14 @@ retry_loop: goto retry_loop; } } - /* Report true CPU frequency */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - if (!bm_timeout) + if (!bm_timeout) { printk(KERN_INFO PFX "Warning: Timeout while waiting for " "idle PCI bus.\n"); + return -EBUSY; + } + + return 0; } /* @@ -422,7 +424,7 @@ static int guess_fsb(int mult) } -static int __cpuinit longhaul_get_ranges(void) +static int longhaul_get_ranges(void) { unsigned int i, j, k = 0; unsigned int ratio; @@ -475,7 +477,7 @@ static int __cpuinit longhaul_get_ranges(void) return -EINVAL; } - longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table), + longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table), GFP_KERNEL); if (!longhaul_table) return -ENOMEM; @@ -487,7 +489,7 @@ static int __cpuinit longhaul_get_ranges(void) if (ratio > maxmult || ratio < minmult) continue; longhaul_table[k].frequency = calc_speed(ratio); - longhaul_table[k].index = j; + longhaul_table[k].driver_data = j; k++; } if (k <= 1) { @@ -508,8 +510,8 @@ static int __cpuinit longhaul_get_ranges(void) if (min_i != j) { swap(longhaul_table[j].frequency, longhaul_table[min_i].frequency); - swap(longhaul_table[j].index, - longhaul_table[min_i].index); + swap(longhaul_table[j].driver_data, + longhaul_table[min_i].driver_data); } } @@ -517,7 +519,7 @@ static int __cpuinit longhaul_get_ranges(void) /* Find index we are running on */ for (j = 0; j < k; j++) { - if (mults[longhaul_table[j].index & 0x1f] == mult) { + if (mults[longhaul_table[j].driver_data & 0x1f] == mult) { longhaul_index = j; break; } @@ -526,8 +528,9 @@ static int __cpuinit longhaul_get_ranges(void) } -static void __cpuinit longhaul_setup_voltagescaling(void) +static void longhaul_setup_voltagescaling(void) { + struct cpufreq_frequency_table *freq_pos; union msr_longhaul longhaul; struct mV_pos minvid, maxvid, vid; unsigned int j, speed, pos, kHz_step, numvscales; @@ -606,18 +609,16 @@ static void __cpuinit longhaul_setup_voltagescaling(void) /* Calculate kHz for one voltage step */ kHz_step = (highest_speed - min_vid_speed) / numvscales; - j = 0; - while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { - speed = longhaul_table[j].frequency; + cpufreq_for_each_entry(freq_pos, longhaul_table) { + speed = freq_pos->frequency; if (speed > min_vid_speed) pos = (speed - min_vid_speed) / kHz_step + minvid.pos; else pos = minvid.pos; - longhaul_table[j].index |= mV_vrm_table[pos] << 8; + freq_pos->driver_data |= mV_vrm_table[pos] << 8; vid = vrm_mV_table[mV_vrm_table[pos]]; printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", - speed, j, vid.mV); - j++; + speed, (int)(freq_pos - longhaul_table), vid.mV); } can_scale_voltage = 1; @@ -625,30 +626,16 @@ static void __cpuinit longhaul_setup_voltagescaling(void) } -static int longhaul_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, longhaul_table); -} - - static int longhaul_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int table_index) { - unsigned int table_index = 0; unsigned int i; unsigned int dir = 0; u8 vid, current_vid; - - if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, - relation, &table_index)) - return -EINVAL; - - /* Don't set same frequency again */ - if (longhaul_index == table_index) - return 0; + int retval = 0; if (!can_scale_voltage) - longhaul_setstate(policy, table_index); + retval = longhaul_setstate(policy, table_index); else { /* On test system voltage transitions exceeding single * step up or down were turning motherboard off. Both @@ -656,14 +643,14 @@ static int longhaul_target(struct cpufreq_policy *policy, * this in hardware, C3 is old and we need to do this * in software. */ i = longhaul_index; - current_vid = (longhaul_table[longhaul_index].index >> 8); + current_vid = (longhaul_table[longhaul_index].driver_data >> 8); current_vid &= 0x1f; if (table_index > longhaul_index) dir = 1; while (i != table_index) { - vid = (longhaul_table[i].index >> 8) & 0x1f; + vid = (longhaul_table[i].driver_data >> 8) & 0x1f; if (vid != current_vid) { - longhaul_setstate(policy, i); + retval = longhaul_setstate(policy, i); current_vid = vid; msleep(200); } @@ -672,10 +659,11 @@ static int longhaul_target(struct cpufreq_policy *policy, else i--; } - longhaul_setstate(policy, table_index); + retval = longhaul_setstate(policy, table_index); } + longhaul_index = table_index; - return 0; + return retval; } @@ -780,7 +768,7 @@ static int longhaul_setup_southbridge(void) return 0; } -static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) +static int longhaul_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); char *cpuname = NULL; @@ -919,37 +907,17 @@ static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) longhaul_setup_voltagescaling(); policy->cpuinfo.transition_latency = 200000; /* nsec */ - policy->cur = calc_speed(longhaul_get_cpu_mult()); - - ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); - if (ret) - return ret; - - cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); - - return 0; -} -static int longhaul_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_table_validate_and_show(policy, longhaul_table); } -static struct freq_attr *longhaul_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver longhaul_driver = { - .verify = longhaul_verify, - .target = longhaul_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = longhaul_target, .get = longhaul_get, .init = longhaul_cpu_init, - .exit = longhaul_cpu_exit, .name = "longhaul", - .owner = THIS_MODULE, - .attr = longhaul_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id longhaul_id[] = { @@ -1003,7 +971,15 @@ static void __exit longhaul_exit(void) for (i = 0; i < numscales; i++) { if (mults[i] == maxmult) { + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + freqs.new = longhaul_table[i].frequency; + freqs.flags = 0; + + cpufreq_freq_transition_begin(policy, &freqs); longhaul_setstate(policy, i); + cpufreq_freq_transition_end(policy, &freqs, 0); break; } } diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h index e2dc436099d..1928b923a57 100644 --- a/drivers/cpufreq/longhaul.h +++ b/drivers/cpufreq/longhaul.h @@ -56,7 +56,7 @@ union msr_longhaul { /* * VIA C3 Samuel 1 & Samuel 2 (stepping 0) */ -static const int __cpuinitconst samuel1_mults[16] = { +static const int samuel1_mults[16] = { -1, /* 0000 -> RESERVED */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -75,7 +75,7 @@ static const int __cpuinitconst samuel1_mults[16] = { -1, /* 1111 -> RESERVED */ }; -static const int __cpuinitconst samuel1_eblcr[16] = { +static const int samuel1_eblcr[16] = { 50, /* 0000 -> RESERVED */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -97,7 +97,7 @@ static const int __cpuinitconst samuel1_eblcr[16] = { /* * VIA C3 Samuel2 Stepping 1->15 */ -static const int __cpuinitconst samuel2_eblcr[16] = { +static const int samuel2_eblcr[16] = { 50, /* 0000 -> 5.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -119,7 +119,7 @@ static const int __cpuinitconst samuel2_eblcr[16] = { /* * VIA C3 Ezra */ -static const int __cpuinitconst ezra_mults[16] = { +static const int ezra_mults[16] = { 100, /* 0000 -> 10.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -138,7 +138,7 @@ static const int __cpuinitconst ezra_mults[16] = { 120, /* 1111 -> 12.0x */ }; -static const int __cpuinitconst ezra_eblcr[16] = { +static const int ezra_eblcr[16] = { 50, /* 0000 -> 5.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -160,7 +160,7 @@ static const int __cpuinitconst ezra_eblcr[16] = { /* * VIA C3 (Ezra-T) [C5M]. */ -static const int __cpuinitconst ezrat_mults[32] = { +static const int ezrat_mults[32] = { 100, /* 0000 -> 10.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -196,7 +196,7 @@ static const int __cpuinitconst ezrat_mults[32] = { -1, /* 1111 -> RESERVED (12.0x) */ }; -static const int __cpuinitconst ezrat_eblcr[32] = { +static const int ezrat_eblcr[32] = { 50, /* 0000 -> 5.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -235,7 +235,7 @@ static const int __cpuinitconst ezrat_eblcr[32] = { /* * VIA C3 Nehemiah */ -static const int __cpuinitconst nehemiah_mults[32] = { +static const int nehemiah_mults[32] = { 100, /* 0000 -> 10.0x */ -1, /* 0001 -> 16.0x */ 40, /* 0010 -> 4.0x */ @@ -270,7 +270,7 @@ static const int __cpuinitconst nehemiah_mults[32] = { -1, /* 1111 -> 12.0x */ }; -static const int __cpuinitconst nehemiah_eblcr[32] = { +static const int nehemiah_eblcr[32] = { 50, /* 0000 -> 5.0x */ 160, /* 0001 -> 16.0x */ 40, /* 0010 -> 4.0x */ @@ -315,7 +315,7 @@ struct mV_pos { unsigned short pos; }; -static const struct mV_pos __cpuinitconst vrm85_mV[32] = { +static const struct mV_pos vrm85_mV[32] = { {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, @@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitconst vrm85_mV[32] = { {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} }; -static const unsigned char __cpuinitconst mV_vrm85[32] = { +static const unsigned char mV_vrm85[32] = { 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 }; -static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = { +static const struct mV_pos mobilevrm_mV[32] = { {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, @@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = { {675, 3}, {650, 2}, {625, 1}, {600, 0} }; -static const unsigned char __cpuinitconst mV_mobilevrm[32] = { +static const unsigned char mV_mobilevrm[32] = { 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 8bc9f5fbbae..074971b1263 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c @@ -33,7 +33,7 @@ static unsigned int longrun_low_freq, longrun_high_freq; * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS * and MSR_TMTA_LONGRUN_CTRL */ -static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) +static void longrun_get_policy(struct cpufreq_policy *policy) { u32 msr_lo, msr_hi; @@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy) return -EINVAL; policy->cpu = 0; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) @@ -163,7 +161,7 @@ static unsigned int longrun_get(unsigned int cpu) * TMTA rules: * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) */ -static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, +static int longrun_determine_freqs(unsigned int *low_freq, unsigned int *high_freq) { u32 msr_lo, msr_hi; @@ -256,7 +254,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, } -static int __cpuinit longrun_cpu_init(struct cpufreq_policy *policy) +static int longrun_cpu_init(struct cpufreq_policy *policy) { int result = 0; @@ -286,7 +284,6 @@ static struct cpufreq_driver longrun_driver = { .get = longrun_get, .init = longrun_cpu_init, .name = "longrun", - .owner = THIS_MODULE, }; static const struct x86_cpu_id longrun_ids[] = { diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index d5391276894..d4add862194 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -24,8 +24,6 @@ static uint nowait; -static struct clk *cpuclk; - static void (*saved_cpu_wait) (void); static int loongson2_cpu_freq_notifier(struct notifier_block *nb, @@ -44,65 +42,34 @@ static int loongson2_cpu_freq_notifier(struct notifier_block *nb, return 0; } -static unsigned int loongson2_cpufreq_get(unsigned int cpu) -{ - return clk_get_rate(cpuclk); -} - /* * Here we notify other drivers of the proposed change and the final change. */ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { unsigned int cpu = policy->cpu; - unsigned int newstate = 0; cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; unsigned int freq; cpus_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); - if (cpufreq_frequency_table_target - (policy, &loongson2_clockmod_table[0], target_freq, relation, - &newstate)) - return -EINVAL; - freq = ((cpu_clock_freq / 1000) * - loongson2_clockmod_table[newstate].index) / 8; - if (freq < policy->min || freq > policy->max) - return -EINVAL; - - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - - freqs.old = loongson2_cpufreq_get(cpu); - freqs.new = freq; - freqs.flags = 0; - - if (freqs.new == freqs.old) - return 0; - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + loongson2_clockmod_table[index].driver_data) / 8; set_cpus_allowed_ptr(current, &cpus_allowed); /* setting the cpu frequency */ - clk_set_rate(cpuclk, freq); - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: set frequency %u kHz\n", freq); + clk_set_rate(policy->clk, freq * 1000); return 0; } static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) { + struct clk *cpuclk; int i; unsigned long rate; int ret; @@ -118,11 +85,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) clk_put(cpuclk); return -EINVAL; } - ret = clk_set_rate(cpuclk, rate); - if (ret) { - clk_put(cpuclk); - return ret; - } /* clock table init */ for (i = 2; @@ -130,41 +92,30 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) i++) loongson2_clockmod_table[i].frequency = (rate * i) / 8; - policy->cur = loongson2_cpufreq_get(policy->cpu); - - cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], - policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, - &loongson2_clockmod_table[0]); -} + ret = clk_set_rate(cpuclk, rate * 1000); + if (ret) { + clk_put(cpuclk); + return ret; + } -static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &loongson2_clockmod_table[0]); + policy->clk = cpuclk; + return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0); } static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) { - clk_put(cpuclk); + clk_put(policy->clk); return 0; } -static struct freq_attr *loongson2_table_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver loongson2_cpufreq_driver = { - .owner = THIS_MODULE, .name = "loongson2", .init = loongson2_cpufreq_cpu_init, - .verify = loongson2_cpufreq_verify, - .target = loongson2_cpufreq_target, - .get = loongson2_cpufreq_get, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = loongson2_cpufreq_target, + .get = cpufreq_generic_get, .exit = loongson2_cpufreq_exit, - .attr = loongson2_table_attr, + .attr = cpufreq_generic_attr, }; static struct platform_device_id platform_device_ids[] = { diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c index cdd62915efa..cc3408fc073 100644 --- a/drivers/cpufreq/maple-cpufreq.c +++ b/drivers/cpufreq/maple-cpufreq.c @@ -24,7 +24,7 @@ #include <linux/completion.h> #include <linux/mutex.h> #include <linux/time.h> -#include <linux/of.h> +#include <linux/of_device.h> #define DBG(fmt...) pr_debug(fmt) @@ -59,14 +59,9 @@ #define CPUFREQ_LOW 1 static struct cpufreq_frequency_table maple_cpu_freqs[] = { - {CPUFREQ_HIGH, 0}, - {CPUFREQ_LOW, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -static struct freq_attr *maple_cpu_freqs_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, + {0, CPUFREQ_HIGH, 0}, + {0, CPUFREQ_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; /* Power mode data is an array of the 32 bits PCR values to use for @@ -74,8 +69,6 @@ static struct freq_attr *maple_cpu_freqs_attr[] = { */ static int maple_pmode_cur; -static DEFINE_MUTEX(maple_switch_mutex); - static const u32 *maple_pmode_data; static int maple_pmode_max; @@ -135,37 +128,10 @@ static int maple_scom_query_freq(void) * Common interface to the cpufreq core */ -static int maple_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, maple_cpu_freqs); -} - static int maple_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { - unsigned int newstate = 0; - struct cpufreq_freqs freqs; - int rc; - - if (cpufreq_frequency_table_target(policy, maple_cpu_freqs, - target_freq, relation, &newstate)) - return -EINVAL; - - if (maple_pmode_cur == newstate) - return 0; - - mutex_lock(&maple_switch_mutex); - - freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; - freqs.new = maple_cpu_freqs[newstate].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - rc = maple_scom_switch_freq(newstate); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - mutex_unlock(&maple_switch_mutex); - - return rc; + return maple_scom_switch_freq(index); } static unsigned int maple_cpufreq_get_speed(unsigned int cpu) @@ -175,33 +141,21 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu) static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) { - policy->cpuinfo.transition_latency = 12000; - policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency; - /* secondary CPUs are tied to the primary one by the - * cpufreq core if in the secondary policy we tell it that - * it actually must be one policy together with all others. */ - cpumask_setall(policy->cpus); - cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, - maple_cpu_freqs); + return cpufreq_generic_init(policy, maple_cpu_freqs, 12000); } - static struct cpufreq_driver maple_cpufreq_driver = { .name = "maple", - .owner = THIS_MODULE, .flags = CPUFREQ_CONST_LOOPS, .init = maple_cpufreq_cpu_init, - .verify = maple_cpufreq_verify, - .target = maple_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = maple_cpufreq_target, .get = maple_cpufreq_get_speed, - .attr = maple_cpu_freqs_attr, + .attr = cpufreq_generic_attr, }; static int __init maple_cpufreq_init(void) { - struct device_node *cpus; struct device_node *cpunode; unsigned int psize; unsigned long max_freq; @@ -217,24 +171,11 @@ static int __init maple_cpufreq_init(void) !of_machine_is_compatible("Momentum,Apache")) return 0; - cpus = of_find_node_by_path("/cpus"); - if (cpus == NULL) { - DBG("No /cpus node !\n"); - return -ENODEV; - } - /* Get first CPU node */ - for (cpunode = NULL; - (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) { - const u32 *reg = of_get_property(cpunode, "reg", NULL); - if (reg == NULL || (*reg) != 0) - continue; - if (!strcmp(cpunode->type, "cpu")) - break; - } + cpunode = of_cpu_device_node_get(0); if (cpunode == NULL) { printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n"); - goto bail_cpus; + goto bail_noprops; } /* Check 970FX for now */ @@ -290,14 +231,11 @@ static int __init maple_cpufreq_init(void) rc = cpufreq_register_driver(&maple_cpufreq_driver); of_node_put(cpunode); - of_node_put(cpus); return rc; bail_noprops: of_node_put(cpunode); -bail_cpus: - of_node_put(cpus); return rc; } diff --git a/drivers/cpufreq/mperf.c b/drivers/cpufreq/mperf.c deleted file mode 100644 index 911e193018a..00000000000 --- a/drivers/cpufreq/mperf.c +++ /dev/null @@ -1,51 +0,0 @@ -#include <linux/kernel.h> -#include <linux/smp.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/slab.h> - -#include "mperf.h" - -static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); - -/* Called via smp_call_function_single(), on the target CPU */ -static void read_measured_perf_ctrs(void *_cur) -{ - struct aperfmperf *am = _cur; - - get_aperfmperf(am); -} - -/* - * Return the measured active (C0) frequency on this CPU since last call - * to this function. - * Input: cpu number - * Return: Average CPU frequency in terms of max frequency (zero on error) - * - * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance - * over a period of time, while CPU is in C0 state. - * IA32_MPERF counts at the rate of max advertised frequency - * IA32_APERF counts at the rate of actual CPU frequency - * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and - * no meaning should be associated with absolute values of these MSRs. - */ -unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, - unsigned int cpu) -{ - struct aperfmperf perf; - unsigned long ratio; - unsigned int retval; - - if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) - return 0; - - ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); - per_cpu(acfreq_old_perf, cpu) = perf; - - retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; - - return retval; -} -EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/mperf.h b/drivers/cpufreq/mperf.h deleted file mode 100644 index 5dbf2950dc2..00000000000 --- a/drivers/cpufreq/mperf.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * (c) 2010 Advanced Micro Devices, Inc. - * Your use of this code is subject to the terms and conditions of the - * GNU general public license version 2. See "COPYING" or - * http://www.gnu.org/licenses/gpl.html - */ - -unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, - unsigned int cpu); diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 0279d18a57f..5f69c9aa703 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -22,7 +22,7 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -36,68 +36,21 @@ static struct cpufreq_frequency_table *freq_table; static atomic_t freq_table_users = ATOMIC_INIT(0); -static struct clk *mpu_clk; static struct device *mpu_dev; static struct regulator *mpu_reg; -static int omap_verify_speed(struct cpufreq_policy *policy) +static int omap_target(struct cpufreq_policy *policy, unsigned int index) { - if (!freq_table) - return -EINVAL; - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int omap_getspeed(unsigned int cpu) -{ - unsigned long rate; - - if (cpu >= NR_CPUS) - return 0; - - rate = clk_get_rate(mpu_clk) / 1000; - return rate; -} - -static int omap_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int i; - int r, ret = 0; - struct cpufreq_freqs freqs; - struct opp *opp; + int r, ret; + struct dev_pm_opp *opp; unsigned long freq, volt = 0, volt_old = 0, tol = 0; + unsigned int old_freq, new_freq; - if (!freq_table) { - dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__, - policy->cpu); - return -EINVAL; - } - - ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &i); - if (ret) { - dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n", - __func__, policy->cpu, target_freq, ret); - return ret; - } - freqs.new = freq_table[i].frequency; - if (!freqs.new) { - dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__, - policy->cpu, target_freq); - return -EINVAL; - } + old_freq = policy->cur; + new_freq = freq_table[index].frequency; - freqs.old = omap_getspeed(policy->cpu); - - if (freqs.old == freqs.new && policy->cur == freqs.new) - return ret; - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - freq = freqs.new * 1000; - ret = clk_round_rate(mpu_clk, freq); + freq = new_freq * 1000; + ret = clk_round_rate(policy->clk, freq); if (IS_ERR_VALUE(ret)) { dev_warn(mpu_dev, "CPUfreq: Cannot find matching frequency for %lu\n", @@ -108,140 +61,102 @@ static int omap_target(struct cpufreq_policy *policy, if (mpu_reg) { rcu_read_lock(); - opp = opp_find_freq_ceil(mpu_dev, &freq); + opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq); if (IS_ERR(opp)) { rcu_read_unlock(); dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", - __func__, freqs.new); + __func__, new_freq); return -EINVAL; } - volt = opp_get_voltage(opp); + volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); tol = volt * OPP_TOLERANCE / 100; volt_old = regulator_get_voltage(mpu_reg); } dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n", - freqs.old / 1000, volt_old ? volt_old / 1000 : -1, - freqs.new / 1000, volt ? volt / 1000 : -1); + old_freq / 1000, volt_old ? volt_old / 1000 : -1, + new_freq / 1000, volt ? volt / 1000 : -1); /* scaling up? scale voltage before frequency */ - if (mpu_reg && (freqs.new > freqs.old)) { + if (mpu_reg && (new_freq > old_freq)) { r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol); if (r < 0) { dev_warn(mpu_dev, "%s: unable to scale voltage up.\n", __func__); - freqs.new = freqs.old; - goto done; + return r; } } - ret = clk_set_rate(mpu_clk, freqs.new * 1000); + ret = clk_set_rate(policy->clk, new_freq * 1000); /* scaling down? scale voltage after frequency */ - if (mpu_reg && (freqs.new < freqs.old)) { + if (mpu_reg && (new_freq < old_freq)) { r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol); if (r < 0) { dev_warn(mpu_dev, "%s: unable to scale voltage down.\n", __func__); - ret = clk_set_rate(mpu_clk, freqs.old * 1000); - freqs.new = freqs.old; - goto done; + clk_set_rate(policy->clk, old_freq * 1000); + return r; } } - freqs.new = omap_getspeed(policy->cpu); - -done: - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return ret; } static inline void freq_table_free(void) { if (atomic_dec_and_test(&freq_table_users)) - opp_free_cpufreq_table(mpu_dev, &freq_table); + dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table); } -static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) +static int omap_cpu_init(struct cpufreq_policy *policy) { - int result = 0; - - mpu_clk = clk_get(NULL, "cpufreq_ck"); - if (IS_ERR(mpu_clk)) - return PTR_ERR(mpu_clk); + int result; - if (policy->cpu >= NR_CPUS) { - result = -EINVAL; - goto fail_ck; - } - - policy->cur = omap_getspeed(policy->cpu); - - if (!freq_table) - result = opp_init_cpufreq_table(mpu_dev, &freq_table); + policy->clk = clk_get(NULL, "cpufreq_ck"); + if (IS_ERR(policy->clk)) + return PTR_ERR(policy->clk); - if (result) { - dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", + if (!freq_table) { + result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table); + if (result) { + dev_err(mpu_dev, + "%s: cpu%d: failed creating freq table[%d]\n", __func__, policy->cpu, result); - goto fail_ck; + goto fail; + } } atomic_inc_return(&freq_table_users); - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (result) - goto fail_table; - - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - - policy->cur = omap_getspeed(policy->cpu); - - /* - * On OMAP SMP configuartion, both processors share the voltage - * and clock. So both CPUs needs to be scaled together and hence - * needs software co-ordination. Use cpufreq affected_cpus - * interface to handle this scenario. Additional is_smp() check - * is to keep SMP_ON_UP build working. - */ - if (is_smp()) - cpumask_setall(policy->cpus); - /* FIXME: what's the actual transition time? */ - policy->cpuinfo.transition_latency = 300 * 1000; - - return 0; + result = cpufreq_generic_init(policy, freq_table, 300 * 1000); + if (!result) + return 0; -fail_table: freq_table_free(); -fail_ck: - clk_put(mpu_clk); +fail: + clk_put(policy->clk); return result; } static int omap_cpu_exit(struct cpufreq_policy *policy) { freq_table_free(); - clk_put(mpu_clk); + clk_put(policy->clk); return 0; } -static struct freq_attr *omap_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver omap_driver = { - .flags = CPUFREQ_STICKY, - .verify = omap_verify_speed, - .target = omap_target, - .get = omap_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = omap_target, + .get = cpufreq_generic_get, .init = omap_cpu_init, .exit = omap_cpu_exit, .name = "omap", - .attr = omap_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int omap_cpufreq_probe(struct platform_device *pdev) diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 421ef37d0bb..529cfd92158 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -92,60 +92,34 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) static struct cpufreq_frequency_table p4clockmod_table[] = { - {DC_RESV, CPUFREQ_ENTRY_INVALID}, - {DC_DFLT, 0}, - {DC_25PT, 0}, - {DC_38PT, 0}, - {DC_50PT, 0}, - {DC_64PT, 0}, - {DC_75PT, 0}, - {DC_88PT, 0}, - {DC_DISABLE, 0}, - {DC_RESV, CPUFREQ_TABLE_END}, + {0, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {0, DC_DFLT, 0}, + {0, DC_25PT, 0}, + {0, DC_38PT, 0}, + {0, DC_50PT, 0}, + {0, DC_64PT, 0}, + {0, DC_75PT, 0}, + {0, DC_88PT, 0}, + {0, DC_DISABLE, 0}, + {0, DC_RESV, CPUFREQ_TABLE_END}, }; -static int cpufreq_p4_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned int newstate = DC_RESV; - struct cpufreq_freqs freqs; int i; - if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], - target_freq, relation, &newstate)) - return -EINVAL; - - freqs.old = cpufreq_p4_get(policy->cpu); - freqs.new = stock_freq * p4clockmod_table[newstate].index / 8; - - if (freqs.new == freqs.old) - return 0; - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* run on each logical CPU, * see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ for_each_cpu(i, policy->cpus) - cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data); return 0; } -static int cpufreq_p4_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); -} - - static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) { if (c->x86 == 0x06) { @@ -230,25 +204,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) else p4clockmod_table[i].frequency = (stock_freq * i)/8; } - cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); /* cpuinfo and default policy values */ /* the transition latency is set to be 1 higher than the maximum * transition latency of the ondemand governor */ policy->cpuinfo.transition_latency = 10000001; - policy->cur = stock_freq; - return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); + return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]); } -static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - static unsigned int cpufreq_p4_get(unsigned int cpu) { u32 l, h; @@ -267,20 +233,13 @@ static unsigned int cpufreq_p4_get(unsigned int cpu) return stock_freq; } -static struct freq_attr *p4clockmod_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver p4clockmod_driver = { - .verify = cpufreq_p4_verify, - .target = cpufreq_p4_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cpufreq_p4_target, .init = cpufreq_p4_cpu_init, - .exit = cpufreq_p4_cpu_exit, .get = cpufreq_p4_get, .name = "p4-clockmod", - .owner = THIS_MODULE, - .attr = p4clockmod_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id cpufreq_p4_id[] = { diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c new file mode 100644 index 00000000000..35dd4d7ffee --- /dev/null +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -0,0 +1,291 @@ +/* + * Copyright (C) 2007 PA Semi, Inc + * + * Authors: Egor Martovetsky <egor@pasemi.com> + * Olof Johansson <olof@lixom.net> + * + * Maintained by: Olof Johansson <olof@lixom.net> + * + * Based on arch/powerpc/platforms/cell/cbe_cpufreq.c: + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/cpufreq.h> +#include <linux/timer.h> +#include <linux/module.h> +#include <linux/of_address.h> + +#include <asm/hw_irq.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/time.h> +#include <asm/smp.h> + +#define SDCASR_REG 0x0100 +#define SDCASR_REG_STRIDE 0x1000 +#define SDCPWR_CFGA0_REG 0x0100 +#define SDCPWR_PWST0_REG 0x0000 +#define SDCPWR_GIZTIME_REG 0x0440 + +/* SDCPWR_GIZTIME_REG fields */ +#define SDCPWR_GIZTIME_GR 0x80000000 +#define SDCPWR_GIZTIME_LONGLOCK 0x000000ff + +/* Offset of ASR registers from SDC base */ +#define SDCASR_OFFSET 0x120000 + +static void __iomem *sdcpwr_mapbase; +static void __iomem *sdcasr_mapbase; + +/* Current astate, is used when waking up from power savings on + * one core, in case the other core has switched states during + * the idle time. + */ +static int current_astate; + +/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */ +static struct cpufreq_frequency_table pas_freqs[] = { + {0, 0, 0}, + {0, 1, 0}, + {0, 2, 0}, + {0, 3, 0}, + {0, 4, 0}, + {0, 0, CPUFREQ_TABLE_END}, +}; + +/* + * hardware specific functions + */ + +static int get_astate_freq(int astate) +{ + u32 ret; + ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10)); + + return ret & 0x3f; +} + +static int get_cur_astate(int cpu) +{ + u32 ret; + + ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG); + ret = (ret >> (cpu * 4)) & 0x7; + + return ret; +} + +static int get_gizmo_latency(void) +{ + u32 giztime, ret; + + giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG); + + /* just provide the upper bound */ + if (giztime & SDCPWR_GIZTIME_GR) + ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000; + else + ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000; + + return ret; +} + +static void set_astate(int cpu, unsigned int astate) +{ + unsigned long flags; + + /* Return if called before init has run */ + if (unlikely(!sdcasr_mapbase)) + return; + + local_irq_save(flags); + + out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate); + + local_irq_restore(flags); +} + +int check_astate(void) +{ + return get_cur_astate(hard_smp_processor_id()); +} + +void restore_astate(int cpu) +{ + set_astate(cpu, current_astate); +} + +/* + * cpufreq functions + */ + +static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *pos; + const u32 *max_freqp; + u32 max_freq; + int cur_astate; + struct resource res; + struct device_node *cpu, *dn; + int err = -ENODEV; + + cpu = of_get_cpu_node(policy->cpu, NULL); + + if (!cpu) + goto out; + + dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); + if (!dn) + dn = of_find_compatible_node(NULL, NULL, + "pasemi,pwrficient-sdc"); + if (!dn) + goto out; + err = of_address_to_resource(dn, 0, &res); + of_node_put(dn); + if (err) + goto out; + sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000); + if (!sdcasr_mapbase) { + err = -EINVAL; + goto out; + } + + dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo"); + if (!dn) + dn = of_find_compatible_node(NULL, NULL, + "pasemi,pwrficient-gizmo"); + if (!dn) { + err = -ENODEV; + goto out_unmap_sdcasr; + } + err = of_address_to_resource(dn, 0, &res); + of_node_put(dn); + if (err) + goto out_unmap_sdcasr; + sdcpwr_mapbase = ioremap(res.start, 0x1000); + if (!sdcpwr_mapbase) { + err = -EINVAL; + goto out_unmap_sdcasr; + } + + pr_debug("init cpufreq on CPU %d\n", policy->cpu); + + max_freqp = of_get_property(cpu, "clock-frequency", NULL); + if (!max_freqp) { + err = -EINVAL; + goto out_unmap_sdcpwr; + } + + /* we need the freq in kHz */ + max_freq = *max_freqp / 1000; + + pr_debug("max clock-frequency is at %u kHz\n", max_freq); + pr_debug("initializing frequency table\n"); + + /* initialize frequency table */ + cpufreq_for_each_entry(pos, pas_freqs) { + pos->frequency = get_astate_freq(pos->driver_data) * 100000; + pr_debug("%d: %d\n", (int)(pos - pas_freqs), pos->frequency); + } + + cur_astate = get_cur_astate(policy->cpu); + pr_debug("current astate is at %d\n",cur_astate); + + policy->cur = pas_freqs[cur_astate].frequency; + ppc_proc_freq = policy->cur * 1000ul; + + return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); + +out_unmap_sdcpwr: + iounmap(sdcpwr_mapbase); + +out_unmap_sdcasr: + iounmap(sdcasr_mapbase); +out: + return err; +} + +static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + /* + * We don't support CPU hotplug. Don't unmap after the system + * has already made it to a running state. + */ + if (system_state != SYSTEM_BOOTING) + return 0; + + if (sdcasr_mapbase) + iounmap(sdcasr_mapbase); + if (sdcpwr_mapbase) + iounmap(sdcpwr_mapbase); + + return 0; +} + +static int pas_cpufreq_target(struct cpufreq_policy *policy, + unsigned int pas_astate_new) +{ + int i; + + pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", + policy->cpu, + pas_freqs[pas_astate_new].frequency, + pas_freqs[pas_astate_new].driver_data); + + current_astate = pas_astate_new; + + for_each_online_cpu(i) + set_astate(i, pas_astate_new); + + ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul; + return 0; +} + +static struct cpufreq_driver pas_cpufreq_driver = { + .name = "pas-cpufreq", + .flags = CPUFREQ_CONST_LOOPS, + .init = pas_cpufreq_cpu_init, + .exit = pas_cpufreq_cpu_exit, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = pas_cpufreq_target, + .attr = cpufreq_generic_attr, +}; + +/* + * module init and destoy + */ + +static int __init pas_cpufreq_init(void) +{ + if (!of_machine_is_compatible("PA6T-1682M") && + !of_machine_is_compatible("pasemi,pwrficient")) + return -ENODEV; + + return cpufreq_register_driver(&pas_cpufreq_driver); +} + +static void __exit pas_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&pas_cpufreq_driver); +} + +module_init(pas_cpufreq_init); +module_exit(pas_cpufreq_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>"); diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 0de00081a81..728a2d87949 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info; static int pcc_cpufreq_verify(struct cpufreq_policy *policy) { - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); return 0; } @@ -214,8 +213,9 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, cpu, target_freq, (pcch_virt_addr + pcc_cpu_data->input_offset)); + freqs.old = policy->cur; freqs.new = target_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); input_buffer = 0x1 | (((target_freq * 100) / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); @@ -229,23 +229,20 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); status = ioread16(&pcch_hdr->status); + iowrite16(0, &pcch_hdr->status); + + cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE); + spin_unlock(&pcc_lock); + if (status != CMD_COMPLETE) { pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", cpu, status); - goto cmd_incomplete; + return -EINVAL; } - iowrite16(0, &pcch_hdr->status); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); - spin_unlock(&pcc_lock); return 0; - -cmd_incomplete: - iowrite16(0, &pcch_hdr->status); - spin_unlock(&pcc_lock); - return -EINVAL; } static int pcc_get_offset(int cpu) @@ -394,15 +391,14 @@ static int __init pcc_cpufreq_probe(void) struct pcc_memory_resource *mem_resource; struct pcc_register_resource *reg_resource; union acpi_object *out_obj, *member; - acpi_handle handle, osc_handle, pcch_handle; + acpi_handle handle, osc_handle; int ret = 0; status = acpi_get_handle(NULL, "\\_SB", &handle); if (ACPI_FAILURE(status)) return -ENODEV; - status = acpi_get_handle(handle, "PCCH", &pcch_handle); - if (ACPI_FAILURE(status)) + if (!acpi_has_method(handle, "PCCH")) return -ENODEV; status = acpi_get_handle(handle, "_OSC", &osc_handle); @@ -558,13 +554,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) ioread32(&pcch_hdr->nominal) * 1000; policy->min = policy->cpuinfo.min_freq = ioread32(&pcch_hdr->minimum_frequency) * 1000; - policy->cur = pcc_get_freq(cpu); - - if (!policy->cur) { - pr_debug("init: Unable to get current CPU frequency\n"); - result = -EINVAL; - goto out; - } pr_debug("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); @@ -585,7 +574,6 @@ static struct cpufreq_driver pcc_cpufreq_driver = { .init = pcc_cpufreq_cpu_init, .exit = pcc_cpufreq_cpu_exit, .name = "pcc-cpufreq", - .owner = THIS_MODULE, }; static int __init pcc_cpufreq_init(void) diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c new file mode 100644 index 00000000000..7615180d7ee --- /dev/null +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -0,0 +1,686 @@ +/* + * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> + * Copyright (C) 2004 John Steele Scott <toojays@toojays.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * TODO: Need a big cleanup here. Basically, we need to have different + * cpufreq_driver structures for the different type of HW instead of the + * current mess. We also need to better deal with the detection of the + * type of machine. + * + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/adb.h> +#include <linux/pmu.h> +#include <linux/cpufreq.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/hardirq.h> +#include <linux/of_device.h> +#include <asm/prom.h> +#include <asm/machdep.h> +#include <asm/irq.h> +#include <asm/pmac_feature.h> +#include <asm/mmu_context.h> +#include <asm/sections.h> +#include <asm/cputable.h> +#include <asm/time.h> +#include <asm/mpic.h> +#include <asm/keylargo.h> +#include <asm/switch_to.h> + +/* WARNING !!! This will cause calibrate_delay() to be called, + * but this is an __init function ! So you MUST go edit + * init/main.c to make it non-init before enabling DEBUG_FREQ + */ +#undef DEBUG_FREQ + +extern void low_choose_7447a_dfs(int dfs); +extern void low_choose_750fx_pll(int pll); +extern void low_sleep_handler(void); + +/* + * Currently, PowerMac cpufreq supports only high & low frequencies + * that are set by the firmware + */ +static unsigned int low_freq; +static unsigned int hi_freq; +static unsigned int cur_freq; +static unsigned int sleep_freq; +static unsigned long transition_latency; + +/* + * Different models uses different mechanisms to switch the frequency + */ +static int (*set_speed_proc)(int low_speed); +static unsigned int (*get_speed_proc)(void); + +/* + * Some definitions used by the various speedprocs + */ +static u32 voltage_gpio; +static u32 frequency_gpio; +static u32 slew_done_gpio; +static int no_schedule; +static int has_cpu_l2lve; +static int is_pmu_based; + +/* There are only two frequency states for each processor. Values + * are in kHz for the time being. + */ +#define CPUFREQ_HIGH 0 +#define CPUFREQ_LOW 1 + +static struct cpufreq_frequency_table pmac_cpu_freqs[] = { + {0, CPUFREQ_HIGH, 0}, + {0, CPUFREQ_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, +}; + +static inline void local_delay(unsigned long ms) +{ + if (no_schedule) + mdelay(ms); + else + msleep(ms); +} + +#ifdef DEBUG_FREQ +static inline void debug_calc_bogomips(void) +{ + /* This will cause a recalc of bogomips and display the + * result. We backup/restore the value to avoid affecting the + * core cpufreq framework's own calculation. + */ + unsigned long save_lpj = loops_per_jiffy; + calibrate_delay(); + loops_per_jiffy = save_lpj; +} +#endif /* DEBUG_FREQ */ + +/* Switch CPU speed under 750FX CPU control + */ +static int cpu_750fx_cpu_speed(int low_speed) +{ + u32 hid2; + + if (low_speed == 0) { + /* ramping up, set voltage first */ + pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); + /* Make sure we sleep for at least 1ms */ + local_delay(10); + + /* tweak L2 for high voltage */ + if (has_cpu_l2lve) { + hid2 = mfspr(SPRN_HID2); + hid2 &= ~0x2000; + mtspr(SPRN_HID2, hid2); + } + } +#ifdef CONFIG_6xx + low_choose_750fx_pll(low_speed); +#endif + if (low_speed == 1) { + /* tweak L2 for low voltage */ + if (has_cpu_l2lve) { + hid2 = mfspr(SPRN_HID2); + hid2 |= 0x2000; + mtspr(SPRN_HID2, hid2); + } + + /* ramping down, set voltage last */ + pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); + local_delay(10); + } + + return 0; +} + +static unsigned int cpu_750fx_get_cpu_speed(void) +{ + if (mfspr(SPRN_HID1) & HID1_PS) + return low_freq; + else + return hi_freq; +} + +/* Switch CPU speed using DFS */ +static int dfs_set_cpu_speed(int low_speed) +{ + if (low_speed == 0) { + /* ramping up, set voltage first */ + pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); + /* Make sure we sleep for at least 1ms */ + local_delay(1); + } + + /* set frequency */ +#ifdef CONFIG_6xx + low_choose_7447a_dfs(low_speed); +#endif + udelay(100); + + if (low_speed == 1) { + /* ramping down, set voltage last */ + pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); + local_delay(1); + } + + return 0; +} + +static unsigned int dfs_get_cpu_speed(void) +{ + if (mfspr(SPRN_HID1) & HID1_DFS) + return low_freq; + else + return hi_freq; +} + + +/* Switch CPU speed using slewing GPIOs + */ +static int gpios_set_cpu_speed(int low_speed) +{ + int gpio, timeout = 0; + + /* If ramping up, set voltage first */ + if (low_speed == 0) { + pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); + /* Delay is way too big but it's ok, we schedule */ + local_delay(10); + } + + /* Set frequency */ + gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0); + if (low_speed == ((gpio & 0x01) == 0)) + goto skip; + + pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio, + low_speed ? 0x04 : 0x05); + udelay(200); + do { + if (++timeout > 100) + break; + local_delay(1); + gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0); + } while((gpio & 0x02) == 0); + skip: + /* If ramping down, set voltage last */ + if (low_speed == 1) { + pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); + /* Delay is way too big but it's ok, we schedule */ + local_delay(10); + } + +#ifdef DEBUG_FREQ + debug_calc_bogomips(); +#endif + + return 0; +} + +/* Switch CPU speed under PMU control + */ +static int pmu_set_cpu_speed(int low_speed) +{ + struct adb_request req; + unsigned long save_l2cr; + unsigned long save_l3cr; + unsigned int pic_prio; + unsigned long flags; + + preempt_disable(); + +#ifdef DEBUG_FREQ + printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1)); +#endif + pmu_suspend(); + + /* Disable all interrupt sources on openpic */ + pic_prio = mpic_cpu_get_priority(); + mpic_cpu_set_priority(0xf); + + /* Make sure the decrementer won't interrupt us */ + asm volatile("mtdec %0" : : "r" (0x7fffffff)); + /* Make sure any pending DEC interrupt occurring while we did + * the above didn't re-enable the DEC */ + mb(); + asm volatile("mtdec %0" : : "r" (0x7fffffff)); + + /* We can now disable MSR_EE */ + local_irq_save(flags); + + /* Giveup the FPU & vec */ + enable_kernel_fp(); + +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + enable_kernel_altivec(); +#endif /* CONFIG_ALTIVEC */ + + /* Save & disable L2 and L3 caches */ + save_l3cr = _get_L3CR(); /* (returns -1 if not available) */ + save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ + + /* Send the new speed command. My assumption is that this command + * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep + */ + pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed); + while (!req.complete) + pmu_poll(); + + /* Prepare the northbridge for the speed transition */ + pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1); + + /* Call low level code to backup CPU state and recover from + * hardware reset + */ + low_sleep_handler(); + + /* Restore the northbridge */ + pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0); + + /* Restore L2 cache */ + if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) + _set_L2CR(save_l2cr); + /* Restore L3 cache */ + if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0) + _set_L3CR(save_l3cr); + + /* Restore userland MMU context */ + switch_mmu_context(NULL, current->active_mm); + +#ifdef DEBUG_FREQ + printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1)); +#endif + + /* Restore low level PMU operations */ + pmu_unlock(); + + /* + * Restore decrementer; we'll take a decrementer interrupt + * as soon as interrupts are re-enabled and the generic + * clockevents code will reprogram it with the right value. + */ + set_dec(1); + + /* Restore interrupts */ + mpic_cpu_set_priority(pic_prio); + + /* Let interrupts flow again ... */ + local_irq_restore(flags); + +#ifdef DEBUG_FREQ + debug_calc_bogomips(); +#endif + + pmu_resume(); + + preempt_enable(); + + return 0; +} + +static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode) +{ + unsigned long l3cr; + static unsigned long prev_l3cr; + + if (speed_mode == CPUFREQ_LOW && + cpu_has_feature(CPU_FTR_L3CR)) { + l3cr = _get_L3CR(); + if (l3cr & L3CR_L3E) { + prev_l3cr = l3cr; + _set_L3CR(0); + } + } + set_speed_proc(speed_mode == CPUFREQ_LOW); + if (speed_mode == CPUFREQ_HIGH && + cpu_has_feature(CPU_FTR_L3CR)) { + l3cr = _get_L3CR(); + if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr) + _set_L3CR(prev_l3cr); + } + cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; + + return 0; +} + +static unsigned int pmac_cpufreq_get_speed(unsigned int cpu) +{ + return cur_freq; +} + +static int pmac_cpufreq_target( struct cpufreq_policy *policy, + unsigned int index) +{ + int rc; + + rc = do_set_cpu_speed(policy, index); + + ppc_proc_freq = cur_freq * 1000ul; + return rc; +} + +static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency); +} + +static u32 read_gpio(struct device_node *np) +{ + const u32 *reg = of_get_property(np, "reg", NULL); + u32 offset; + + if (reg == NULL) + return 0; + /* That works for all keylargos but shall be fixed properly + * some day... The problem is that it seems we can't rely + * on the "reg" property of the GPIO nodes, they are either + * relative to the base of KeyLargo or to the base of the + * GPIO space, and the device-tree doesn't help. + */ + offset = *reg; + if (offset < KEYLARGO_GPIO_LEVELS0) + offset += KEYLARGO_GPIO_LEVELS0; + return offset; +} + +static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) +{ + /* Ok, this could be made a bit smarter, but let's be robust for now. We + * always force a speed change to high speed before sleep, to make sure + * we have appropriate voltage and/or bus speed for the wakeup process, + * and to make sure our loops_per_jiffies are "good enough", that is will + * not cause too short delays if we sleep in low speed and wake in high + * speed.. + */ + no_schedule = 1; + sleep_freq = cur_freq; + if (cur_freq == low_freq && !is_pmu_based) + do_set_cpu_speed(policy, CPUFREQ_HIGH); + return 0; +} + +static int pmac_cpufreq_resume(struct cpufreq_policy *policy) +{ + /* If we resume, first check if we have a get() function */ + if (get_speed_proc) + cur_freq = get_speed_proc(); + else + cur_freq = 0; + + /* We don't, hrm... we don't really know our speed here, best + * is that we force a switch to whatever it was, which is + * probably high speed due to our suspend() routine + */ + do_set_cpu_speed(policy, sleep_freq == low_freq ? + CPUFREQ_LOW : CPUFREQ_HIGH); + + ppc_proc_freq = cur_freq * 1000ul; + + no_schedule = 0; + return 0; +} + +static struct cpufreq_driver pmac_cpufreq_driver = { + .verify = cpufreq_generic_frequency_table_verify, + .target_index = pmac_cpufreq_target, + .get = pmac_cpufreq_get_speed, + .init = pmac_cpufreq_cpu_init, + .suspend = pmac_cpufreq_suspend, + .resume = pmac_cpufreq_resume, + .flags = CPUFREQ_PM_NO_WARN, + .attr = cpufreq_generic_attr, + .name = "powermac", +}; + + +static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) +{ + struct device_node *volt_gpio_np = of_find_node_by_name(NULL, + "voltage-gpio"); + struct device_node *freq_gpio_np = of_find_node_by_name(NULL, + "frequency-gpio"); + struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL, + "slewing-done"); + const u32 *value; + + /* + * Check to see if it's GPIO driven or PMU only + * + * The way we extract the GPIO address is slightly hackish, but it + * works well enough for now. We need to abstract the whole GPIO + * stuff sooner or later anyway + */ + + if (volt_gpio_np) + voltage_gpio = read_gpio(volt_gpio_np); + if (freq_gpio_np) + frequency_gpio = read_gpio(freq_gpio_np); + if (slew_done_gpio_np) + slew_done_gpio = read_gpio(slew_done_gpio_np); + + /* If we use the frequency GPIOs, calculate the min/max speeds based + * on the bus frequencies + */ + if (frequency_gpio && slew_done_gpio) { + int lenp, rc; + const u32 *freqs, *ratio; + + freqs = of_get_property(cpunode, "bus-frequencies", &lenp); + lenp /= sizeof(u32); + if (freqs == NULL || lenp != 2) { + printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n"); + return 1; + } + ratio = of_get_property(cpunode, "processor-to-bus-ratio*2", + NULL); + if (ratio == NULL) { + printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n"); + return 1; + } + + /* Get the min/max bus frequencies */ + low_freq = min(freqs[0], freqs[1]); + hi_freq = max(freqs[0], freqs[1]); + + /* Grrrr.. It _seems_ that the device-tree is lying on the low bus + * frequency, it claims it to be around 84Mhz on some models while + * it appears to be approx. 101Mhz on all. Let's hack around here... + * fortunately, we don't need to be too precise + */ + if (low_freq < 98000000) + low_freq = 101000000; + + /* Convert those to CPU core clocks */ + low_freq = (low_freq * (*ratio)) / 2000; + hi_freq = (hi_freq * (*ratio)) / 2000; + + /* Now we get the frequencies, we read the GPIO to see what is out current + * speed + */ + rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0); + cur_freq = (rc & 0x01) ? hi_freq : low_freq; + + set_speed_proc = gpios_set_cpu_speed; + return 1; + } + + /* If we use the PMU, look for the min & max frequencies in the + * device-tree + */ + value = of_get_property(cpunode, "min-clock-frequency", NULL); + if (!value) + return 1; + low_freq = (*value) / 1000; + /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree + * here */ + if (low_freq < 100000) + low_freq *= 10; + + value = of_get_property(cpunode, "max-clock-frequency", NULL); + if (!value) + return 1; + hi_freq = (*value) / 1000; + set_speed_proc = pmu_set_cpu_speed; + is_pmu_based = 1; + + return 0; +} + +static int pmac_cpufreq_init_7447A(struct device_node *cpunode) +{ + struct device_node *volt_gpio_np; + + if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL) + return 1; + + volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); + if (volt_gpio_np) + voltage_gpio = read_gpio(volt_gpio_np); + if (!voltage_gpio){ + printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n"); + return 1; + } + + /* OF only reports the high frequency */ + hi_freq = cur_freq; + low_freq = cur_freq/2; + + /* Read actual frequency from CPU */ + cur_freq = dfs_get_cpu_speed(); + set_speed_proc = dfs_set_cpu_speed; + get_speed_proc = dfs_get_cpu_speed; + + return 0; +} + +static int pmac_cpufreq_init_750FX(struct device_node *cpunode) +{ + struct device_node *volt_gpio_np; + u32 pvr; + const u32 *value; + + if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL) + return 1; + + hi_freq = cur_freq; + value = of_get_property(cpunode, "reduced-clock-frequency", NULL); + if (!value) + return 1; + low_freq = (*value) / 1000; + + volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); + if (volt_gpio_np) + voltage_gpio = read_gpio(volt_gpio_np); + + pvr = mfspr(SPRN_PVR); + has_cpu_l2lve = !((pvr & 0xf00) == 0x100); + + set_speed_proc = cpu_750fx_cpu_speed; + get_speed_proc = cpu_750fx_get_cpu_speed; + cur_freq = cpu_750fx_get_cpu_speed(); + + return 0; +} + +/* Currently, we support the following machines: + * + * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz) + * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz) + * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz) + * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz) + * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz) + * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage) + * - Recent MacRISC3 laptops + * - All new machines with 7447A CPUs + */ +static int __init pmac_cpufreq_setup(void) +{ + struct device_node *cpunode; + const u32 *value; + + if (strstr(cmd_line, "nocpufreq")) + return 0; + + /* Get first CPU node */ + cpunode = of_cpu_device_node_get(0); + if (!cpunode) + goto out; + + /* Get current cpu clock freq */ + value = of_get_property(cpunode, "clock-frequency", NULL); + if (!value) + goto out; + cur_freq = (*value) / 1000; + transition_latency = CPUFREQ_ETERNAL; + + /* Check for 7447A based MacRISC3 */ + if (of_machine_is_compatible("MacRISC3") && + of_get_property(cpunode, "dynamic-power-step", NULL) && + PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { + pmac_cpufreq_init_7447A(cpunode); + transition_latency = 8000000; + /* Check for other MacRISC3 machines */ + } else if (of_machine_is_compatible("PowerBook3,4") || + of_machine_is_compatible("PowerBook3,5") || + of_machine_is_compatible("MacRISC3")) { + pmac_cpufreq_init_MacRISC3(cpunode); + /* Else check for iBook2 500/600 */ + } else if (of_machine_is_compatible("PowerBook4,1")) { + hi_freq = cur_freq; + low_freq = 400000; + set_speed_proc = pmu_set_cpu_speed; + is_pmu_based = 1; + } + /* Else check for TiPb 550 */ + else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) { + hi_freq = cur_freq; + low_freq = 500000; + set_speed_proc = pmu_set_cpu_speed; + is_pmu_based = 1; + } + /* Else check for TiPb 400 & 500 */ + else if (of_machine_is_compatible("PowerBook3,2")) { + /* We only know about the 400 MHz and the 500Mhz model + * they both have 300 MHz as low frequency + */ + if (cur_freq < 350000 || cur_freq > 550000) + goto out; + hi_freq = cur_freq; + low_freq = 300000; + set_speed_proc = pmu_set_cpu_speed; + is_pmu_based = 1; + } + /* Else check for 750FX */ + else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000) + pmac_cpufreq_init_750FX(cpunode); +out: + of_node_put(cpunode); + if (set_speed_proc == NULL) + return -ENODEV; + + pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq; + pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq; + ppc_proc_freq = cur_freq * 1000ul; + + printk(KERN_INFO "Registering PowerMac CPU frequency driver\n"); + printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n", + low_freq/1000, hi_freq/1000, cur_freq/1000); + + return cpufreq_register_driver(&pmac_cpufreq_driver); +} + +module_init(pmac_cpufreq_setup); + diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c new file mode 100644 index 00000000000..8bc422977b5 --- /dev/null +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -0,0 +1,677 @@ +/* + * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> + * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs, + * that is iMac G5 and latest single CPU desktop. + */ + +#undef DEBUG + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/cpufreq.h> +#include <linux/init.h> +#include <linux/completion.h> +#include <linux/mutex.h> +#include <linux/of_device.h> +#include <asm/prom.h> +#include <asm/machdep.h> +#include <asm/irq.h> +#include <asm/sections.h> +#include <asm/cputable.h> +#include <asm/time.h> +#include <asm/smu.h> +#include <asm/pmac_pfunc.h> + +#define DBG(fmt...) pr_debug(fmt) + +/* see 970FX user manual */ + +#define SCOM_PCR 0x0aa001 /* PCR scom addr */ + +#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */ +#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */ +#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */ +#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */ +#define PCR_SPEED_MASK 0x000e0000U /* speed mask */ +#define PCR_SPEED_SHIFT 17 +#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */ +#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */ +#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */ +#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */ +#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */ +#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */ + +#define SCOM_PSR 0x408001 /* PSR scom addr */ +/* warning: PSR is a 64 bits register */ +#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */ +#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */ +#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */ +#define PSR_CUR_SPEED_SHIFT (56) + +/* + * The G5 only supports two frequencies (Quarter speed is not supported) + */ +#define CPUFREQ_HIGH 0 +#define CPUFREQ_LOW 1 + +static struct cpufreq_frequency_table g5_cpu_freqs[] = { + {0, CPUFREQ_HIGH, 0}, + {0, CPUFREQ_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, +}; + +/* Power mode data is an array of the 32 bits PCR values to use for + * the various frequencies, retrieved from the device-tree + */ +static int g5_pmode_cur; + +static void (*g5_switch_volt)(int speed_mode); +static int (*g5_switch_freq)(int speed_mode); +static int (*g5_query_freq)(void); + +static unsigned long transition_latency; + +#ifdef CONFIG_PMAC_SMU + +static const u32 *g5_pmode_data; +static int g5_pmode_max; + +static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ +static int g5_fvt_count; /* number of op. points */ +static int g5_fvt_cur; /* current op. point */ + +/* + * SMU based voltage switching for Neo2 platforms + */ + +static void g5_smu_switch_volt(int speed_mode) +{ + struct smu_simple_cmd cmd; + + DECLARE_COMPLETION_ONSTACK(comp); + smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete, + &comp, 'V', 'S', 'L', 'E', 'W', + 0xff, g5_fvt_cur+1, speed_mode); + wait_for_completion(&comp); +} + +/* + * Platform function based voltage/vdnap switching for Neo2 + */ + +static struct pmf_function *pfunc_set_vdnap0; +static struct pmf_function *pfunc_vdnap0_complete; + +static void g5_vdnap_switch_volt(int speed_mode) +{ + struct pmf_args args; + u32 slew, done = 0; + unsigned long timeout; + + slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0; + args.count = 1; + args.u[0].p = &slew; + + pmf_call_one(pfunc_set_vdnap0, &args); + + /* It's an irq GPIO so we should be able to just block here, + * I'll do that later after I've properly tested the IRQ code for + * platform functions + */ + timeout = jiffies + HZ/10; + while(!time_after(jiffies, timeout)) { + args.count = 1; + args.u[0].p = &done; + pmf_call_one(pfunc_vdnap0_complete, &args); + if (done) + break; + usleep_range(1000, 1000); + } + if (done == 0) + printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); +} + + +/* + * SCOM based frequency switching for 970FX rev3 + */ +static int g5_scom_switch_freq(int speed_mode) +{ + unsigned long flags; + int to; + + /* If frequency is going up, first ramp up the voltage */ + if (speed_mode < g5_pmode_cur) + g5_switch_volt(speed_mode); + + local_irq_save(flags); + + /* Clear PCR high */ + scom970_write(SCOM_PCR, 0); + /* Clear PCR low */ + scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0); + /* Set PCR low */ + scom970_write(SCOM_PCR, PCR_HILO_SELECT | + g5_pmode_data[speed_mode]); + + /* Wait for completion */ + for (to = 0; to < 10; to++) { + unsigned long psr = scom970_read(SCOM_PSR); + + if ((psr & PSR_CMD_RECEIVED) == 0 && + (((psr >> PSR_CUR_SPEED_SHIFT) ^ + (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3) + == 0) + break; + if (psr & PSR_CMD_COMPLETED) + break; + udelay(100); + } + + local_irq_restore(flags); + + /* If frequency is going down, last ramp the voltage */ + if (speed_mode > g5_pmode_cur) + g5_switch_volt(speed_mode); + + g5_pmode_cur = speed_mode; + ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; + + return 0; +} + +static int g5_scom_query_freq(void) +{ + unsigned long psr = scom970_read(SCOM_PSR); + int i; + + for (i = 0; i <= g5_pmode_max; i++) + if ((((psr >> PSR_CUR_SPEED_SHIFT) ^ + (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0) + break; + return i; +} + +/* + * Fake voltage switching for platforms with missing support + */ + +static void g5_dummy_switch_volt(int speed_mode) +{ +} + +#endif /* CONFIG_PMAC_SMU */ + +/* + * Platform function based voltage switching for PowerMac7,2 & 7,3 + */ + +static struct pmf_function *pfunc_cpu0_volt_high; +static struct pmf_function *pfunc_cpu0_volt_low; +static struct pmf_function *pfunc_cpu1_volt_high; +static struct pmf_function *pfunc_cpu1_volt_low; + +static void g5_pfunc_switch_volt(int speed_mode) +{ + if (speed_mode == CPUFREQ_HIGH) { + if (pfunc_cpu0_volt_high) + pmf_call_one(pfunc_cpu0_volt_high, NULL); + if (pfunc_cpu1_volt_high) + pmf_call_one(pfunc_cpu1_volt_high, NULL); + } else { + if (pfunc_cpu0_volt_low) + pmf_call_one(pfunc_cpu0_volt_low, NULL); + if (pfunc_cpu1_volt_low) + pmf_call_one(pfunc_cpu1_volt_low, NULL); + } + usleep_range(10000, 10000); /* should be faster , to fix */ +} + +/* + * Platform function based frequency switching for PowerMac7,2 & 7,3 + */ + +static struct pmf_function *pfunc_cpu_setfreq_high; +static struct pmf_function *pfunc_cpu_setfreq_low; +static struct pmf_function *pfunc_cpu_getfreq; +static struct pmf_function *pfunc_slewing_done; + +static int g5_pfunc_switch_freq(int speed_mode) +{ + struct pmf_args args; + u32 done = 0; + unsigned long timeout; + int rc; + + DBG("g5_pfunc_switch_freq(%d)\n", speed_mode); + + /* If frequency is going up, first ramp up the voltage */ + if (speed_mode < g5_pmode_cur) + g5_switch_volt(speed_mode); + + /* Do it */ + if (speed_mode == CPUFREQ_HIGH) + rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL); + else + rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL); + + if (rc) + printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc); + + /* It's an irq GPIO so we should be able to just block here, + * I'll do that later after I've properly tested the IRQ code for + * platform functions + */ + timeout = jiffies + HZ/10; + while(!time_after(jiffies, timeout)) { + args.count = 1; + args.u[0].p = &done; + pmf_call_one(pfunc_slewing_done, &args); + if (done) + break; + usleep_range(500, 500); + } + if (done == 0) + printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); + + /* If frequency is going down, last ramp the voltage */ + if (speed_mode > g5_pmode_cur) + g5_switch_volt(speed_mode); + + g5_pmode_cur = speed_mode; + ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; + + return 0; +} + +static int g5_pfunc_query_freq(void) +{ + struct pmf_args args; + u32 val = 0; + + args.count = 1; + args.u[0].p = &val; + pmf_call_one(pfunc_cpu_getfreq, &args); + return val ? CPUFREQ_HIGH : CPUFREQ_LOW; +} + + +/* + * Common interface to the cpufreq core + */ + +static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) +{ + return g5_switch_freq(index); +} + +static unsigned int g5_cpufreq_get_speed(unsigned int cpu) +{ + return g5_cpu_freqs[g5_pmode_cur].frequency; +} + +static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency); +} + +static struct cpufreq_driver g5_cpufreq_driver = { + .name = "powermac", + .flags = CPUFREQ_CONST_LOOPS, + .init = g5_cpufreq_cpu_init, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = g5_cpufreq_target, + .get = g5_cpufreq_get_speed, + .attr = cpufreq_generic_attr, +}; + + +#ifdef CONFIG_PMAC_SMU + +static int __init g5_neo2_cpufreq_init(struct device_node *cpunode) +{ + unsigned int psize, ssize; + unsigned long max_freq; + char *freq_method, *volt_method; + const u32 *valp; + u32 pvr_hi; + int use_volts_vdnap = 0; + int use_volts_smu = 0; + int rc = -ENODEV; + + /* Check supported platforms */ + if (of_machine_is_compatible("PowerMac8,1") || + of_machine_is_compatible("PowerMac8,2") || + of_machine_is_compatible("PowerMac9,1") || + of_machine_is_compatible("PowerMac12,1")) + use_volts_smu = 1; + else if (of_machine_is_compatible("PowerMac11,2")) + use_volts_vdnap = 1; + else + return -ENODEV; + + /* Check 970FX for now */ + valp = of_get_property(cpunode, "cpu-version", NULL); + if (!valp) { + DBG("No cpu-version property !\n"); + goto bail_noprops; + } + pvr_hi = (*valp) >> 16; + if (pvr_hi != 0x3c && pvr_hi != 0x44) { + printk(KERN_ERR "cpufreq: Unsupported CPU version\n"); + goto bail_noprops; + } + + /* Look for the powertune data in the device-tree */ + g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize); + if (!g5_pmode_data) { + DBG("No power-mode-data !\n"); + goto bail_noprops; + } + g5_pmode_max = psize / sizeof(u32) - 1; + + if (use_volts_smu) { + const struct smu_sdbp_header *shdr; + + /* Look for the FVT table */ + shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); + if (!shdr) + goto bail_noprops; + g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; + ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr); + g5_fvt_count = ssize / sizeof(*g5_fvt_table); + g5_fvt_cur = 0; + + /* Sanity checking */ + if (g5_fvt_count < 1 || g5_pmode_max < 1) + goto bail_noprops; + + g5_switch_volt = g5_smu_switch_volt; + volt_method = "SMU"; + } else if (use_volts_vdnap) { + struct device_node *root; + + root = of_find_node_by_path("/"); + if (root == NULL) { + printk(KERN_ERR "cpufreq: Can't find root of " + "device tree\n"); + goto bail_noprops; + } + pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0"); + pfunc_vdnap0_complete = + pmf_find_function(root, "slewing-done"); + if (pfunc_set_vdnap0 == NULL || + pfunc_vdnap0_complete == NULL) { + printk(KERN_ERR "cpufreq: Can't find required " + "platform function\n"); + goto bail_noprops; + } + + g5_switch_volt = g5_vdnap_switch_volt; + volt_method = "GPIO"; + } else { + g5_switch_volt = g5_dummy_switch_volt; + volt_method = "none"; + } + + /* + * From what I see, clock-frequency is always the maximal frequency. + * The current driver can not slew sysclk yet, so we really only deal + * with powertune steps for now. We also only implement full freq and + * half freq in this version. So far, I haven't yet seen a machine + * supporting anything else. + */ + valp = of_get_property(cpunode, "clock-frequency", NULL); + if (!valp) + return -ENODEV; + max_freq = (*valp)/1000; + g5_cpu_freqs[0].frequency = max_freq; + g5_cpu_freqs[1].frequency = max_freq/2; + + /* Set callbacks */ + transition_latency = 12000; + g5_switch_freq = g5_scom_switch_freq; + g5_query_freq = g5_scom_query_freq; + freq_method = "SCOM"; + + /* Force apply current frequency to make sure everything is in + * sync (voltage is right for example). Firmware may leave us with + * a strange setting ... + */ + g5_switch_volt(CPUFREQ_HIGH); + msleep(10); + g5_pmode_cur = -1; + g5_switch_freq(g5_query_freq()); + + printk(KERN_INFO "Registering G5 CPU frequency driver\n"); + printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n", + freq_method, volt_method); + printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", + g5_cpu_freqs[1].frequency/1000, + g5_cpu_freqs[0].frequency/1000, + g5_cpu_freqs[g5_pmode_cur].frequency/1000); + + rc = cpufreq_register_driver(&g5_cpufreq_driver); + + /* We keep the CPU node on hold... hopefully, Apple G5 don't have + * hotplug CPU with a dynamic device-tree ... + */ + return rc; + + bail_noprops: + of_node_put(cpunode); + + return rc; +} + +#endif /* CONFIG_PMAC_SMU */ + + +static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) +{ + struct device_node *cpuid = NULL, *hwclock = NULL; + const u8 *eeprom = NULL; + const u32 *valp; + u64 max_freq, min_freq, ih, il; + int has_volt = 1, rc = 0; + + DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and" + " RackMac3,1...\n"); + + /* Lookup the cpuid eeprom node */ + cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); + if (cpuid != NULL) + eeprom = of_get_property(cpuid, "cpuid", NULL); + if (eeprom == NULL) { + printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n"); + rc = -ENODEV; + goto bail; + } + + /* Lookup the i2c hwclock */ + for (hwclock = NULL; + (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){ + const char *loc = of_get_property(hwclock, + "hwctrl-location", NULL); + if (loc == NULL) + continue; + if (strcmp(loc, "CPU CLOCK")) + continue; + if (!of_get_property(hwclock, "platform-get-frequency", NULL)) + continue; + break; + } + if (hwclock == NULL) { + printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n"); + rc = -ENODEV; + goto bail; + } + + DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name); + + /* Now get all the platform functions */ + pfunc_cpu_getfreq = + pmf_find_function(hwclock, "get-frequency"); + pfunc_cpu_setfreq_high = + pmf_find_function(hwclock, "set-frequency-high"); + pfunc_cpu_setfreq_low = + pmf_find_function(hwclock, "set-frequency-low"); + pfunc_slewing_done = + pmf_find_function(hwclock, "slewing-done"); + pfunc_cpu0_volt_high = + pmf_find_function(hwclock, "set-voltage-high-0"); + pfunc_cpu0_volt_low = + pmf_find_function(hwclock, "set-voltage-low-0"); + pfunc_cpu1_volt_high = + pmf_find_function(hwclock, "set-voltage-high-1"); + pfunc_cpu1_volt_low = + pmf_find_function(hwclock, "set-voltage-low-1"); + + /* Check we have minimum requirements */ + if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL || + pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) { + printk(KERN_ERR "cpufreq: Can't find platform functions !\n"); + rc = -ENODEV; + goto bail; + } + + /* Check that we have complete sets */ + if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) { + pmf_put_function(pfunc_cpu0_volt_high); + pmf_put_function(pfunc_cpu0_volt_low); + pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL; + has_volt = 0; + } + if (!has_volt || + pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) { + pmf_put_function(pfunc_cpu1_volt_high); + pmf_put_function(pfunc_cpu1_volt_low); + pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL; + } + + /* Note: The device tree also contains a "platform-set-values" + * function for which I haven't quite figured out the usage. It + * might have to be called on init and/or wakeup, I'm not too sure + * but things seem to work fine without it so far ... + */ + + /* Get max frequency from device-tree */ + valp = of_get_property(cpunode, "clock-frequency", NULL); + if (!valp) { + printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n"); + rc = -ENODEV; + goto bail; + } + + max_freq = (*valp)/1000; + + /* Now calculate reduced frequency by using the cpuid input freq + * ratio. This requires 64 bits math unless we are willing to lose + * some precision + */ + ih = *((u32 *)(eeprom + 0x10)); + il = *((u32 *)(eeprom + 0x20)); + + /* Check for machines with no useful settings */ + if (il == ih) { + printk(KERN_WARNING "cpufreq: No low frequency mode available" + " on this model !\n"); + rc = -ENODEV; + goto bail; + } + + min_freq = 0; + if (ih != 0 && il != 0) + min_freq = (max_freq * il) / ih; + + /* Sanity check */ + if (min_freq >= max_freq || min_freq < 1000) { + printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n"); + rc = -ENXIO; + goto bail; + } + g5_cpu_freqs[0].frequency = max_freq; + g5_cpu_freqs[1].frequency = min_freq; + + /* Based on a measurement on Xserve G5, rounded up. */ + transition_latency = 10 * NSEC_PER_MSEC; + + /* Set callbacks */ + g5_switch_volt = g5_pfunc_switch_volt; + g5_switch_freq = g5_pfunc_switch_freq; + g5_query_freq = g5_pfunc_query_freq; + + /* Force apply current frequency to make sure everything is in + * sync (voltage is right for example). Firmware may leave us with + * a strange setting ... + */ + g5_switch_volt(CPUFREQ_HIGH); + msleep(10); + g5_pmode_cur = -1; + g5_switch_freq(g5_query_freq()); + + printk(KERN_INFO "Registering G5 CPU frequency driver\n"); + printk(KERN_INFO "Frequency method: i2c/pfunc, " + "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none"); + printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", + g5_cpu_freqs[1].frequency/1000, + g5_cpu_freqs[0].frequency/1000, + g5_cpu_freqs[g5_pmode_cur].frequency/1000); + + rc = cpufreq_register_driver(&g5_cpufreq_driver); + bail: + if (rc != 0) { + pmf_put_function(pfunc_cpu_getfreq); + pmf_put_function(pfunc_cpu_setfreq_high); + pmf_put_function(pfunc_cpu_setfreq_low); + pmf_put_function(pfunc_slewing_done); + pmf_put_function(pfunc_cpu0_volt_high); + pmf_put_function(pfunc_cpu0_volt_low); + pmf_put_function(pfunc_cpu1_volt_high); + pmf_put_function(pfunc_cpu1_volt_low); + } + of_node_put(hwclock); + of_node_put(cpuid); + of_node_put(cpunode); + + return rc; +} + +static int __init g5_cpufreq_init(void) +{ + struct device_node *cpunode; + int rc = 0; + + /* Get first CPU node */ + cpunode = of_cpu_device_node_get(0); + if (cpunode == NULL) { + pr_err("cpufreq: Can't find any CPU node\n"); + return -ENODEV; + } + + if (of_machine_is_compatible("PowerMac7,2") || + of_machine_is_compatible("PowerMac7,3") || + of_machine_is_compatible("RackMac3,1")) + rc = g5_pm72_cpufreq_init(cpunode); +#ifdef CONFIG_PMAC_SMU + else + rc = g5_neo2_cpufreq_init(cpunode); +#endif /* CONFIG_PMAC_SMU */ + + return rc; +} + +module_init(g5_cpufreq_init); + + +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index ea0222a45b7..c8012bc8691 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -26,165 +26,214 @@ static unsigned int busfreq; /* FSB, in 10 kHz */ static unsigned int max_multiplier; +static unsigned int param_busfreq = 0; +static unsigned int param_max_multiplier = 0; + +module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO); +MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)"); + +module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO); +MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz"); /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ static struct cpufreq_frequency_table clock_ratio[] = { - {45, /* 000 -> 4.5x */ 0}, - {50, /* 001 -> 5.0x */ 0}, - {40, /* 010 -> 4.0x */ 0}, - {55, /* 011 -> 5.5x */ 0}, - {20, /* 100 -> 2.0x */ 0}, - {30, /* 101 -> 3.0x */ 0}, - {60, /* 110 -> 6.0x */ 0}, - {35, /* 111 -> 3.5x */ 0}, - {0, CPUFREQ_TABLE_END} + {0, 60, /* 110 -> 6.0x */ 0}, + {0, 55, /* 011 -> 5.5x */ 0}, + {0, 50, /* 001 -> 5.0x */ 0}, + {0, 45, /* 000 -> 4.5x */ 0}, + {0, 40, /* 010 -> 4.0x */ 0}, + {0, 35, /* 111 -> 3.5x */ 0}, + {0, 30, /* 101 -> 3.0x */ 0}, + {0, 20, /* 100 -> 2.0x */ 0}, + {0, 0, CPUFREQ_TABLE_END} +}; + +static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 }; +static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 }; + +static const struct { + unsigned freq; + unsigned mult; +} usual_frequency_table[] = { + { 400000, 40 }, // 100 * 4 + { 450000, 45 }, // 100 * 4.5 + { 475000, 50 }, // 95 * 5 + { 500000, 50 }, // 100 * 5 + { 506250, 45 }, // 112.5 * 4.5 + { 533500, 55 }, // 97 * 5.5 + { 550000, 55 }, // 100 * 5.5 + { 562500, 50 }, // 112.5 * 5 + { 570000, 60 }, // 95 * 6 + { 600000, 60 }, // 100 * 6 + { 618750, 55 }, // 112.5 * 5.5 + { 660000, 55 }, // 120 * 5.5 + { 675000, 60 }, // 112.5 * 6 + { 720000, 60 }, // 120 * 6 }; +#define FREQ_RANGE 3000 /** * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier * - * Returns the current setting of the frequency multiplier. Core clock + * Returns the current setting of the frequency multiplier. Core clock * speed is frequency of the Front-Side Bus multiplied with this value. */ static int powernow_k6_get_cpu_multiplier(void) { - u64 invalue = 0; + unsigned long invalue = 0; u32 msrval; + local_irq_disable(); + msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ - return clock_ratio[(invalue >> 5)&7].index; -} + local_irq_enable(); + return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data; +} -/** - * powernow_k6_set_state - set the PowerNow! multiplier - * @best_i: clock_ratio[best_i] is the target multiplier - * - * Tries to change the PowerNow! multiplier - */ -static void powernow_k6_set_state(struct cpufreq_policy *policy, - unsigned int best_i) +static void powernow_k6_set_cpu_multiplier(unsigned int best_i) { - unsigned long outvalue = 0, invalue = 0; + unsigned long outvalue, invalue; unsigned long msrval; - struct cpufreq_freqs freqs; - - if (clock_ratio[best_i].index > max_multiplier) { - printk(KERN_ERR PFX "invalid target frequency\n"); - return; - } - - freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); - freqs.new = busfreq * clock_ratio[best_i].index; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + unsigned long cr0; /* we now need to transform best_i to the BVC format, see AMD#23446 */ - outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); + /* + * The processor doesn't respond to inquiry cycles while changing the + * frequency, so we must disable cache. + */ + local_irq_disable(); + cr0 = read_cr0(); + write_cr0(cr0 | X86_CR0_CD); + wbinvd(); + + outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5); msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); - invalue = invalue & 0xf; + invalue = invalue & 0x1f; outvalue = outvalue | invalue; - outl(outvalue , (POWERNOW_IOPORT + 0x8)); + outl(outvalue, (POWERNOW_IOPORT + 0x8)); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return; -} - - -/** - * powernow_k6_verify - verifies a new CPUfreq policy - * @policy: new policy - * - * Policy must be within lowest and highest possible CPU Frequency, - * and at least one possible state must be within min and max. - */ -static int powernow_k6_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); + write_cr0(cr0); + local_irq_enable(); } - /** - * powernow_k6_setpolicy - sets a new CPUFreq policy - * @policy: new policy - * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency - * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * powernow_k6_target - set the PowerNow! multiplier + * @best_i: clock_ratio[best_i] is the target multiplier * - * sets a new CPUFreq policy + * Tries to change the PowerNow! multiplier */ static int powernow_k6_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int best_i) { - unsigned int newstate = 0; - if (cpufreq_frequency_table_target(policy, &clock_ratio[0], - target_freq, relation, &newstate)) + if (clock_ratio[best_i].driver_data > max_multiplier) { + printk(KERN_ERR PFX "invalid target frequency\n"); return -EINVAL; + } - powernow_k6_set_state(policy, newstate); + powernow_k6_set_cpu_multiplier(best_i); return 0; } - static int powernow_k6_cpu_init(struct cpufreq_policy *policy) { + struct cpufreq_frequency_table *pos; unsigned int i, f; - int result; + unsigned khz; if (policy->cpu != 0) return -ENODEV; - /* get frequencies */ - max_multiplier = powernow_k6_get_cpu_multiplier(); - busfreq = cpu_khz / max_multiplier; + max_multiplier = 0; + khz = cpu_khz; + for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) { + if (khz >= usual_frequency_table[i].freq - FREQ_RANGE && + khz <= usual_frequency_table[i].freq + FREQ_RANGE) { + khz = usual_frequency_table[i].freq; + max_multiplier = usual_frequency_table[i].mult; + break; + } + } + if (param_max_multiplier) { + cpufreq_for_each_entry(pos, clock_ratio) + if (pos->driver_data == param_max_multiplier) { + max_multiplier = param_max_multiplier; + goto have_max_multiplier; + } + printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n"); + return -EINVAL; + } + + if (!max_multiplier) { + printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz); + printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n"); + return -EOPNOTSUPP; + } + +have_max_multiplier: + param_max_multiplier = max_multiplier; + + if (param_busfreq) { + if (param_busfreq >= 50000 && param_busfreq <= 150000) { + busfreq = param_busfreq / 10; + goto have_busfreq; + } + printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n"); + return -EINVAL; + } + + busfreq = khz / max_multiplier; +have_busfreq: + param_busfreq = busfreq * 10; /* table init */ - for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { - f = clock_ratio[i].index; + cpufreq_for_each_entry(pos, clock_ratio) { + f = pos->driver_data; if (f > max_multiplier) - clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; + pos->frequency = CPUFREQ_ENTRY_INVALID; else - clock_ratio[i].frequency = busfreq * f; + pos->frequency = busfreq * f; } /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 200000; - policy->cur = busfreq * max_multiplier; - - result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); - if (result) - return result; + policy->cpuinfo.transition_latency = 500000; - cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); - - return 0; + return cpufreq_table_validate_and_show(policy, clock_ratio); } static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) { unsigned int i; - for (i = 0; i < 8; i++) { - if (i == max_multiplier) - powernow_k6_set_state(policy, i); + + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].driver_data == max_multiplier) { + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + freqs.new = clock_ratio[i].frequency; + freqs.flags = 0; + + cpufreq_freq_transition_begin(policy, &freqs); + powernow_k6_target(policy, i); + cpufreq_freq_transition_end(policy, &freqs, 0); + break; + } } - cpufreq_frequency_table_put_attr(policy->cpu); return 0; } @@ -195,20 +244,14 @@ static unsigned int powernow_k6_get(unsigned int cpu) return ret; } -static struct freq_attr *powernow_k6_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver powernow_k6_driver = { - .verify = powernow_k6_verify, - .target = powernow_k6_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernow_k6_target, .init = powernow_k6_cpu_init, .exit = powernow_k6_cpu_exit, .get = powernow_k6_get, .name = "powernow-k6", - .owner = THIS_MODULE, - .attr = powernow_k6_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id powernow_k6_ids[] = { diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 53888dacbe5..e61e224475a 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -177,7 +177,7 @@ static int get_ranges(unsigned char *pst) unsigned int speed; u8 fid, vid; - powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * + powernow_table = kzalloc((sizeof(*powernow_table) * (number_scales + 1)), GFP_KERNEL); if (!powernow_table) return -ENOMEM; @@ -186,7 +186,7 @@ static int get_ranges(unsigned char *pst) fid = *pst++; powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10; - powernow_table[j].index = fid; /* lower 8 bits */ + powernow_table[j].driver_data = fid; /* lower 8 bits */ speed = powernow_table[j].frequency; @@ -203,7 +203,7 @@ static int get_ranges(unsigned char *pst) maximum_speed = speed; vid = *pst++; - powernow_table[j].index |= (vid << 8); /* upper 8 bits */ + powernow_table[j].driver_data |= (vid << 8); /* upper 8 bits */ pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, @@ -212,7 +212,7 @@ static int get_ranges(unsigned char *pst) mobile_vid_table[vid]%1000); } powernow_table[number_scales].frequency = CPUFREQ_TABLE_END; - powernow_table[number_scales].index = 0; + powernow_table[number_scales].driver_data = 0; return 0; } @@ -248,7 +248,7 @@ static void change_VID(int vid) } -static void change_speed(struct cpufreq_policy *policy, unsigned int index) +static int powernow_target(struct cpufreq_policy *policy, unsigned int index) { u8 fid, vid; struct cpufreq_freqs freqs; @@ -260,8 +260,8 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index) * vid are the upper 8 bits. */ - fid = powernow_table[index].index & 0xFF; - vid = (powernow_table[index].index & 0xFF00) >> 8; + fid = powernow_table[index].driver_data & 0xFF; + vid = (powernow_table[index].driver_data & 0xFF00) >> 8; rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; @@ -269,8 +269,6 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index) freqs.new = powernow_table[index].frequency; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* Now do the magic poking into the MSRs. */ if (have_a0 == 1) /* A0 errata 5 */ @@ -290,7 +288,7 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index) if (have_a0 == 1) local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + return 0; } @@ -309,8 +307,7 @@ static int powernow_acpi_init(void) goto err0; } - acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance), - GFP_KERNEL); + acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL); if (!acpi_processor_perf) { retval = -ENOMEM; goto err0; @@ -346,7 +343,7 @@ static int powernow_acpi_init(void) goto err2; } - powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * + powernow_table = kzalloc((sizeof(*powernow_table) * (number_scales + 1)), GFP_KERNEL); if (!powernow_table) { retval = -ENOMEM; @@ -373,8 +370,8 @@ static int powernow_acpi_init(void) fid = pc.bits.fid; powernow_table[i].frequency = fsb * fid_codes[fid] / 10; - powernow_table[i].index = fid; /* lower 8 bits */ - powernow_table[i].index |= (vid << 8); /* upper 8 bits */ + powernow_table[i].driver_data = fid; /* lower 8 bits */ + powernow_table[i].driver_data |= (vid << 8); /* upper 8 bits */ speed = powernow_table[i].frequency; speed_mhz = speed / 1000; @@ -417,7 +414,7 @@ static int powernow_acpi_init(void) } powernow_table[i].frequency = CPUFREQ_TABLE_END; - powernow_table[i].index = 0; + powernow_table[i].driver_data = 0; /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); @@ -497,7 +494,7 @@ static int powernow_decode_bios(int maxfid, int startvid) "relevant to this CPU).\n", psb->numpst); - p += sizeof(struct psb_s); + p += sizeof(*psb); pst = (struct pst_s *) p; @@ -510,12 +507,12 @@ static int powernow_decode_bios(int maxfid, int startvid) (maxfid == pst->maxfid) && (startvid == pst->startvid)) { print_pst_entry(pst, j); - p = (char *)pst + sizeof(struct pst_s); + p = (char *)pst + sizeof(*pst); ret = get_ranges(p); return ret; } else { unsigned int k; - p = (char *)pst + sizeof(struct pst_s); + p = (char *)pst + sizeof(*pst); for (k = 0; k < number_scales; k++) p += 2; } @@ -534,27 +531,6 @@ static int powernow_decode_bios(int maxfid, int startvid) } -static int powernow_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate; - - if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, - relation, &newstate)) - return -EINVAL; - - change_speed(policy, newstate); - - return 0; -} - - -static int powernow_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, powernow_table); -} - /* * We use the fact that the bus frequency is somehow * a multiple of 100000/3 khz, then we compute sgtc according @@ -563,7 +539,7 @@ static int powernow_verify(struct cpufreq_policy *policy) * We will then get the same kind of behaviour already tested under * the "well-known" other OS. */ -static int __cpuinit fixup_sgtc(void) +static int fixup_sgtc(void) { unsigned int sgtc; unsigned int m; @@ -597,7 +573,7 @@ static unsigned int powernow_get(unsigned int cpu) } -static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d) +static int acer_cpufreq_pst(const struct dmi_system_id *d) { printk(KERN_WARNING PFX "%s laptop with broken PST tables in BIOS detected.\n", @@ -615,7 +591,7 @@ static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d) * A BIOS update is all that can save them. * Mention this, and disable cpufreq. */ -static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = { +static struct dmi_system_id powernow_dmi_table[] = { { .callback = acer_cpufreq_pst, .ident = "Acer Aspire", @@ -627,7 +603,7 @@ static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = { { } }; -static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) +static int powernow_cpu_init(struct cpufreq_policy *policy) { union msr_fidvidstatus fidvidstatus; int result; @@ -679,17 +655,11 @@ static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency); - policy->cur = powernow_get(0); - - cpufreq_frequency_table_get_attr(powernow_table, policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, powernow_table); + return cpufreq_table_validate_and_show(policy, powernow_table); } static int powernow_cpu_exit(struct cpufreq_policy *policy) { - cpufreq_frequency_table_put_attr(policy->cpu); - #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (acpi_processor_perf) { acpi_processor_unregister_performance(acpi_processor_perf, 0); @@ -702,14 +672,9 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy) return 0; } -static struct freq_attr *powernow_table_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver powernow_driver = { - .verify = powernow_verify, - .target = powernow_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernow_target, .get = powernow_get, #ifdef CONFIG_X86_POWERNOW_K7_ACPI .bios_limit = acpi_processor_get_bios_limit, @@ -717,8 +682,7 @@ static struct cpufreq_driver powernow_driver = { .init = powernow_cpu_init, .exit = powernow_cpu_exit, .name = "powernow-k7", - .owner = THIS_MODULE, - .attr = powernow_table_attr, + .attr = cpufreq_generic_attr, }; static int __init powernow_init(void) diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index b828efe4b2f..f9ce7e4bf0f 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -27,6 +27,8 @@ * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf) */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/smp.h> #include <linux/module.h> @@ -45,7 +47,6 @@ #include <linux/mutex.h> #include <acpi/processor.h> -#define PFX "powernow-k8: " #define VERSION "version 2.20.00" #include "powernow-k8.h" @@ -161,7 +162,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) u32 i = 0; if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { - printk(KERN_ERR PFX "internal error - overflow on fid write\n"); + pr_err("internal error - overflow on fid write\n"); return 1; } @@ -175,9 +176,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) do { wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); if (i++ > 100) { - printk(KERN_ERR PFX - "Hardware error - pending bit very stuck - " - "no further pstate changes possible\n"); + pr_err("Hardware error - pending bit very stuck - no further pstate changes possible\n"); return 1; } } while (query_current_values_with_pending_wait(data)); @@ -185,15 +184,13 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) count_off_irt(data); if (savevid != data->currvid) { - printk(KERN_ERR PFX - "vid change on fid trans, old 0x%x, new 0x%x\n", - savevid, data->currvid); + pr_err("vid change on fid trans, old 0x%x, new 0x%x\n", + savevid, data->currvid); return 1; } if (fid != data->currfid) { - printk(KERN_ERR PFX - "fid trans failed, fid 0x%x, curr 0x%x\n", fid, + pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid, data->currfid); return 1; } @@ -209,7 +206,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) int i = 0; if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { - printk(KERN_ERR PFX "internal error - overflow on vid write\n"); + pr_err("internal error - overflow on vid write\n"); return 1; } @@ -223,23 +220,19 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) do { wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); if (i++ > 100) { - printk(KERN_ERR PFX "internal error - pending bit " - "very stuck - no further pstate " - "changes possible\n"); + pr_err("internal error - pending bit very stuck - no further pstate changes possible\n"); return 1; } } while (query_current_values_with_pending_wait(data)); if (savefid != data->currfid) { - printk(KERN_ERR PFX "fid changed on vid trans, old " - "0x%x new 0x%x\n", - savefid, data->currfid); + pr_err("fid changed on vid trans, old 0x%x new 0x%x\n", + savefid, data->currfid); return 1; } if (vid != data->currvid) { - printk(KERN_ERR PFX "vid trans failed, vid 0x%x, " - "curr 0x%x\n", + pr_err("vid trans failed, vid 0x%x, curr 0x%x\n", vid, data->currvid); return 1; } @@ -283,8 +276,7 @@ static int transition_fid_vid(struct powernow_k8_data *data, return 1; if ((reqfid != data->currfid) || (reqvid != data->currvid)) { - printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, " - "curr 0x%x 0x%x\n", + pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", smp_processor_id(), reqfid, reqvid, data->currfid, data->currvid); return 1; @@ -304,8 +296,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 savefid = data->currfid; u32 maxvid, lo, rvomult = 1; - pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " - "reqvid 0x%x, rvo 0x%x\n", + pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqvid, data->rvo); @@ -342,8 +333,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, return 1; if (savefid != data->currfid) { - printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", - data->currfid); + pr_err("ph1 err, currfid changed 0x%x\n", data->currfid); return 1; } @@ -360,13 +350,11 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) u32 fid_interval, savevid = data->currvid; if (data->currfid == reqfid) { - printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", - data->currfid); + pr_err("ph2 null fid transition 0x%x\n", data->currfid); return 0; } - pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " - "reqfid 0x%x\n", + pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqfid); @@ -409,15 +397,13 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) return 1; if (data->currfid != reqfid) { - printk(KERN_ERR PFX - "ph2: mismatch, failed fid transition, " - "curr 0x%x, req 0x%x\n", + pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", data->currfid, reqfid); return 1; } if (savevid != data->currvid) { - printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n", + pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n", savevid, data->currvid); return 1; } @@ -444,17 +430,14 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, return 1; if (savefid != data->currfid) { - printk(KERN_ERR PFX - "ph3: bad fid change, save 0x%x, curr 0x%x\n", - savefid, data->currfid); + pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n", + savefid, data->currfid); return 1; } if (data->currvid != reqvid) { - printk(KERN_ERR PFX - "ph3: failed vid transition\n, " - "req 0x%x, curr 0x%x", - reqvid, data->currvid); + pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x", + reqvid, data->currvid); return 1; } } @@ -498,23 +481,20 @@ static void check_supported_cpu(void *_rc) if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { - printk(KERN_INFO PFX - "Processor cpuid %x not supported\n", eax); + pr_info("Processor cpuid %x not supported\n", eax); return; } eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { - printk(KERN_INFO PFX - "No frequency change capabilities detected\n"); + pr_info("No frequency change capabilities detected\n"); return; } cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { - printk(KERN_INFO PFX - "Power state transitions not supported\n"); + pr_info("Power state transitions not supported\n"); return; } *rc = 0; @@ -529,43 +509,39 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, for (j = 0; j < data->numps; j++) { if (pst[j].vid > LEAST_VID) { - printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n", - j, pst[j].vid); + pr_err(FW_BUG "vid %d invalid : 0x%x\n", j, + pst[j].vid); return -EINVAL; } if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */ - printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate" - " %d\n", j); + pr_err(FW_BUG "0 vid exceeded with pstate %d\n", j); return -ENODEV; } if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */ - printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate" - " %d\n", j); + pr_err(FW_BUG "maxvid exceeded with pstate %d\n", j); return -ENODEV; } if (pst[j].fid > MAX_FID) { - printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate" - " %d\n", j); + pr_err(FW_BUG "maxfid exceeded with pstate %d\n", j); return -ENODEV; } if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { /* Only first fid is allowed to be in "low" range */ - printk(KERN_ERR FW_BUG PFX "two low fids - %d : " - "0x%x\n", j, pst[j].fid); + pr_err(FW_BUG "two low fids - %d : 0x%x\n", j, + pst[j].fid); return -EINVAL; } if (pst[j].fid < lastfid) lastfid = pst[j].fid; } if (lastfid & 1) { - printk(KERN_ERR FW_BUG PFX "lastfid invalid\n"); + pr_err(FW_BUG "lastfid invalid\n"); return -EINVAL; } if (lastfid > LO_FID_TABLE_TOP) - printk(KERN_INFO FW_BUG PFX - "first fid not from lo freq table\n"); + pr_info(FW_BUG "first fid not from lo freq table\n"); return 0; } @@ -582,16 +558,14 @@ static void print_basics(struct powernow_k8_data *data) for (j = 0; j < data->numps; j++) { if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) { - printk(KERN_INFO PFX - "fid 0x%x (%d MHz), vid 0x%x\n", - data->powernow_table[j].index & 0xff, - data->powernow_table[j].frequency/1000, - data->powernow_table[j].index >> 8); + pr_info("fid 0x%x (%d MHz), vid 0x%x\n", + data->powernow_table[j].driver_data & 0xff, + data->powernow_table[j].frequency/1000, + data->powernow_table[j].driver_data >> 8); } } if (data->batps) - printk(KERN_INFO PFX "Only %d pstates on battery\n", - data->batps); + pr_info("Only %d pstates on battery\n", data->batps); } static int fill_powernow_table(struct powernow_k8_data *data, @@ -602,43 +576,42 @@ static int fill_powernow_table(struct powernow_k8_data *data, if (data->batps) { /* use ACPI support to get full speed on mains power */ - printk(KERN_WARNING PFX - "Only %d pstates usable (use ACPI driver for full " - "range\n", data->batps); + pr_warn("Only %d pstates usable (use ACPI driver for full range\n", + data->batps); data->numps = data->batps; } for (j = 1; j < data->numps; j++) { if (pst[j-1].fid >= pst[j].fid) { - printk(KERN_ERR PFX "PST out of sequence\n"); + pr_err("PST out of sequence\n"); return -EINVAL; } } if (data->numps < 2) { - printk(KERN_ERR PFX "no p states to transition\n"); + pr_err("no p states to transition\n"); return -ENODEV; } if (check_pst_table(data, pst, maxvid)) return -EINVAL; - powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) + powernow_table = kzalloc((sizeof(*powernow_table) * (data->numps + 1)), GFP_KERNEL); if (!powernow_table) { - printk(KERN_ERR PFX "powernow_table memory alloc failure\n"); + pr_err("powernow_table memory alloc failure\n"); return -ENOMEM; } for (j = 0; j < data->numps; j++) { int freq; - powernow_table[j].index = pst[j].fid; /* lower 8 bits */ - powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */ + powernow_table[j].driver_data = pst[j].fid; /* lower 8 bits */ + powernow_table[j].driver_data |= (pst[j].vid << 8); /* upper 8 bits */ freq = find_khz_freq_from_fid(pst[j].fid); powernow_table[j].frequency = freq; } powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; - powernow_table[data->numps].index = 0; + powernow_table[data->numps].driver_data = 0; if (query_current_values_with_pending_wait(data)) { kfree(powernow_table); @@ -681,13 +654,13 @@ static int find_psb_table(struct powernow_k8_data *data) pr_debug("table vers: 0x%x\n", psb->tableversion); if (psb->tableversion != PSB_VERSION_1_4) { - printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); + pr_err(FW_BUG "PSB table is not v1.4\n"); return -ENODEV; } pr_debug("flags: 0x%x\n", psb->flags1); if (psb->flags1) { - printk(KERN_ERR FW_BUG PFX "unknown flags\n"); + pr_err(FW_BUG "unknown flags\n"); return -ENODEV; } @@ -716,7 +689,7 @@ static int find_psb_table(struct powernow_k8_data *data) cpst = 1; } if (cpst != 1) { - printk(KERN_ERR FW_BUG PFX "numpst must be 1\n"); + pr_err(FW_BUG "numpst must be 1\n"); return -ENODEV; } @@ -742,9 +715,8 @@ static int find_psb_table(struct powernow_k8_data *data) * BIOS and Kernel Developer's Guide, which is available on * www.amd.com */ - printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n"); - printk(KERN_ERR PFX "Make sure that your BIOS is up to date" - " and Cool'N'Quiet support is enabled in BIOS setup\n"); + pr_err(FW_BUG "No PSB or ACPI _PSS objects\n"); + pr_err("Make sure that your BIOS is up to date and Cool'N'Quiet support is enabled in BIOS setup\n"); return -ENODEV; } @@ -793,7 +765,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) } /* fill in data->powernow_table */ - powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) + powernow_table = kzalloc((sizeof(*powernow_table) * (data->acpi_data.state_count + 1)), GFP_KERNEL); if (!powernow_table) { pr_debug("powernow_table memory alloc failure\n"); @@ -810,7 +782,6 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; - powernow_table[data->acpi_data.state_count].index = 0; data->powernow_table = powernow_table; if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) @@ -820,8 +791,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) acpi_processor_notify_smm(THIS_MODULE); if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { - printk(KERN_ERR PFX - "unable to alloc powernow_k8_data cpumask\n"); + pr_err("unable to alloc powernow_k8_data cpumask\n"); ret_val = -ENOMEM; goto err_out_mem; } @@ -865,7 +835,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); index = fid | (vid<<8); - powernow_table[i].index = index; + powernow_table[i].driver_data = index; freq = find_khz_freq_from_fid(fid); powernow_table[i].frequency = freq; @@ -886,9 +856,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, } if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { - printk(KERN_INFO PFX "invalid freq entries " - "%u kHz vs. %u kHz\n", freq, - (unsigned int) + pr_info("invalid freq entries %u kHz vs. %u kHz\n", + freq, (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); invalidate_entry(powernow_table, i); @@ -917,7 +886,7 @@ static int get_transition_latency(struct powernow_k8_data *data) max_latency = cur_latency; } if (max_latency == 0) { - pr_err(FW_WARN PFX "Invalid zero transition latency\n"); + pr_err(FW_WARN "Invalid zero transition latency\n"); max_latency = 1; } /* value in usecs, needs to be in nanoseconds */ @@ -941,8 +910,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, * the cpufreq frequency table in find_psb_table, vid * are the upper 8 bits. */ - fid = data->powernow_table[index].index & 0xFF; - vid = (data->powernow_table[index].index & 0xFF00) >> 8; + fid = data->powernow_table[index].driver_data & 0xFF; + vid = (data->powernow_table[index].driver_data & 0xFF00) >> 8; pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); @@ -963,34 +932,26 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, policy = cpufreq_cpu_get(smp_processor_id()); cpufreq_cpu_put(policy); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - + cpufreq_freq_transition_begin(policy, &freqs); res = transition_fid_vid(data, fid, vid); - if (res) - return res; + cpufreq_freq_transition_end(policy, &freqs, res); - freqs.new = find_khz_freq_from_fid(data->currfid); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return res; } struct powernowk8_target_arg { struct cpufreq_policy *pol; - unsigned targfreq; - unsigned relation; + unsigned newstate; }; static long powernowk8_target_fn(void *arg) { struct powernowk8_target_arg *pta = arg; struct cpufreq_policy *pol = pta->pol; - unsigned targfreq = pta->targfreq; - unsigned relation = pta->relation; + unsigned newstate = pta->newstate; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; - unsigned int newstate; int ret; if (!data) @@ -1000,31 +961,27 @@ static long powernowk8_target_fn(void *arg) checkvid = data->currvid; if (pending_bit_stuck()) { - printk(KERN_ERR PFX "failing targ, change pending bit set\n"); + pr_err("failing targ, change pending bit set\n"); return -EIO; } - pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", - pol->cpu, targfreq, pol->min, pol->max, relation); + pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n", + pol->cpu, data->powernow_table[newstate].frequency, pol->min, + pol->max); if (query_current_values_with_pending_wait(data)) return -EIO; pr_debug("targ: curr fid 0x%x, vid 0x%x\n", - data->currfid, data->currvid); + data->currfid, data->currvid); if ((checkvid != data->currvid) || (checkfid != data->currfid)) { - pr_info(PFX - "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", + pr_info("error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", checkfid, data->currfid, checkvid, data->currvid); } - if (cpufreq_frequency_table_target(pol, data->powernow_table, - targfreq, relation, &newstate)) - return -EIO; - mutex_lock(&fidvid_mutex); powernow_k8_acpi_pst_values(data, newstate); @@ -1032,7 +989,7 @@ static long powernowk8_target_fn(void *arg) ret = transition_frequency_fidvid(data, newstate); if (ret) { - printk(KERN_ERR PFX "transition frequency failed\n"); + pr_err("transition frequency failed\n"); mutex_unlock(&fidvid_mutex); return 1; } @@ -1044,37 +1001,24 @@ static long powernowk8_target_fn(void *arg) } /* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, - unsigned targfreq, unsigned relation) +static int powernowk8_target(struct cpufreq_policy *pol, unsigned index) { - struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, - .relation = relation }; + struct powernowk8_target_arg pta = { .pol = pol, .newstate = index }; return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); } -/* Driver entry point to verify the policy and range of frequencies */ -static int powernowk8_verify(struct cpufreq_policy *pol) -{ - struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); - - if (!data) - return -EINVAL; - - return cpufreq_frequency_table_verify(pol, data->powernow_table); -} - struct init_on_cpu { struct powernow_k8_data *data; int rc; }; -static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) +static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu) { struct init_on_cpu *init_on_cpu = _init_on_cpu; if (pending_bit_stuck()) { - printk(KERN_ERR PFX "failing init, change pending bit set\n"); + pr_err("failing init, change pending bit set\n"); init_on_cpu->rc = -ENODEV; return; } @@ -1089,26 +1033,25 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) init_on_cpu->rc = 0; } -static const char missing_pss_msg[] = - KERN_ERR - FW_BUG PFX "No compatible ACPI _PSS objects found.\n" - FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" - FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n"; +#define MISSING_PSS_MSG \ + FW_BUG "No compatible ACPI _PSS objects found.\n" \ + FW_BUG "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" \ + FW_BUG "If that doesn't help, try upgrading your BIOS.\n" /* per CPU init entry point to the driver */ -static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) +static int powernowk8_cpu_init(struct cpufreq_policy *pol) { struct powernow_k8_data *data; struct init_on_cpu init_on_cpu; - int rc; + int rc, cpu; smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); if (rc) return -ENODEV; - data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { - printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); + pr_err("unable to alloc powernow_k8_data"); return -ENOMEM; } @@ -1120,13 +1063,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { - printk_once(missing_pss_msg); + pr_err_once(MISSING_PSS_MSG); goto err_out; } if (pol->cpu != 0) { - printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " - "CPU other than CPU0. Complain to your BIOS " - "vendor.\n"); + pr_err(FW_BUG "No ACPI _PSS objects for CPU other than CPU0. Complain to your BIOS vendor.\n"); goto err_out; } rc = find_psb_table(data); @@ -1152,24 +1093,21 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); data->available_cores = pol->cpus; - pol->cur = find_khz_freq_from_fid(data->currfid); - pr_debug("policy current frequency %d kHz\n", pol->cur); - /* min/max the cpu is capable of */ - if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { - printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); + if (cpufreq_table_validate_and_show(pol, data->powernow_table)) { + pr_err(FW_BUG "invalid powernow_table\n"); powernow_k8_cpu_exit_acpi(data); kfree(data->powernow_table); kfree(data); return -EINVAL; } - cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); - pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", - data->currfid, data->currvid); + data->currfid, data->currvid); - per_cpu(powernow_data, pol->cpu) = data; + /* Point all the CPUs in this policy to the same data */ + for_each_cpu(cpu, pol->cpus) + per_cpu(powernow_data, cpu) = data; return 0; @@ -1184,17 +1122,17 @@ err_out: static int powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); + int cpu; if (!data) return -EINVAL; powernow_k8_cpu_exit_acpi(data); - cpufreq_frequency_table_put_attr(pol->cpu); - kfree(data->powernow_table); kfree(data); - per_cpu(powernow_data, pol->cpu) = NULL; + for_each_cpu(cpu, pol->cpus) + per_cpu(powernow_data, cpu) = NULL; return 0; } @@ -1227,21 +1165,16 @@ out: return khz; } -static struct freq_attr *powernow_k8_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver cpufreq_amd64_driver = { - .verify = powernowk8_verify, - .target = powernowk8_target, + .flags = CPUFREQ_ASYNC_NOTIFICATION, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernowk8_target, .bios_limit = acpi_processor_get_bios_limit, .init = powernowk8_cpu_init, .exit = powernowk8_cpu_exit, .get = powernowk8_get, .name = "powernow-k8", - .owner = THIS_MODULE, - .attr = powernow_k8_attr, + .attr = cpufreq_generic_attr, }; static void __request_acpi_cpufreq(void) @@ -1253,17 +1186,17 @@ static void __request_acpi_cpufreq(void) goto request; if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv)))) - pr_warn(PFX "WTF driver: %s\n", cur_drv); + pr_warn("WTF driver: %s\n", cur_drv); return; request: - pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n"); + pr_warn("This CPU is not supported anymore, using acpi-cpufreq instead.\n"); request_module(drv); } /* driver entry point for init */ -static int __cpuinit powernowk8_init(void) +static int powernowk8_init(void) { unsigned int i, supported_cpus = 0; int ret; @@ -1293,7 +1226,7 @@ static int __cpuinit powernowk8_init(void) if (ret) return ret; - pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", + pr_info("Found %d %s (%d cpu cores) (" VERSION ")\n", num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); return ret; @@ -1307,8 +1240,8 @@ static void __exit powernowk8_exit(void) cpufreq_unregister_driver(&cpufreq_amd64_driver); } -MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and " - "Mark Langsdorf <mark.langsdorf@amd.com>"); +MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>"); +MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>"); MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h index 79329d4d5ab..45ce11e8662 100644 --- a/drivers/cpufreq/powernow-k8.h +++ b/drivers/cpufreq/powernow-k8.h @@ -19,7 +19,7 @@ struct powernow_k8_data { u32 vidmvs; /* usable value calculated from mvs */ u32 vstable; /* voltage stabilization time, units 20 us */ u32 plllock; /* pll lock time, units 1 us */ - u32 exttype; /* extended interface = 1 */ + u32 exttype; /* extended interface = 1 */ /* keep track of the current fid / vid or pstate */ u32 currvid; diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c new file mode 100644 index 00000000000..bb1d08dc8cc --- /dev/null +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -0,0 +1,342 @@ +/* + * POWERNV cpufreq driver for the IBM POWER processors + * + * (C) Copyright IBM 2014 + * + * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "powernv-cpufreq: " fmt + +#include <linux/kernel.h> +#include <linux/sysfs.h> +#include <linux/cpumask.h> +#include <linux/module.h> +#include <linux/cpufreq.h> +#include <linux/smp.h> +#include <linux/of.h> + +#include <asm/cputhreads.h> +#include <asm/reg.h> +#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ + +#define POWERNV_MAX_PSTATES 256 + +static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; + +/* + * Note: The set of pstates consists of contiguous integers, the + * smallest of which is indicated by powernv_pstate_info.min, the + * largest of which is indicated by powernv_pstate_info.max. + * + * The nominal pstate is the highest non-turbo pstate in this + * platform. This is indicated by powernv_pstate_info.nominal. + */ +static struct powernv_pstate_info { + int min; + int max; + int nominal; + int nr_pstates; +} powernv_pstate_info; + +/* + * Initialize the freq table based on data obtained + * from the firmware passed via device-tree + */ +static int init_powernv_pstates(void) +{ + struct device_node *power_mgt; + int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0; + const __be32 *pstate_ids, *pstate_freqs; + u32 len_ids, len_freqs; + + power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); + if (!power_mgt) { + pr_warn("power-mgt node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) { + pr_warn("ibm,pstate-min node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) { + pr_warn("ibm,pstate-max node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-nominal", + &pstate_nominal)) { + pr_warn("ibm,pstate-nominal not found\n"); + return -ENODEV; + } + pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min, + pstate_nominal, pstate_max); + + pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids); + if (!pstate_ids) { + pr_warn("ibm,pstate-ids not found\n"); + return -ENODEV; + } + + pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz", + &len_freqs); + if (!pstate_freqs) { + pr_warn("ibm,pstate-frequencies-mhz not found\n"); + return -ENODEV; + } + + WARN_ON(len_ids != len_freqs); + nr_pstates = min(len_ids, len_freqs) / sizeof(u32); + if (!nr_pstates) { + pr_warn("No PStates found\n"); + return -ENODEV; + } + + pr_debug("NR PStates %d\n", nr_pstates); + for (i = 0; i < nr_pstates; i++) { + u32 id = be32_to_cpu(pstate_ids[i]); + u32 freq = be32_to_cpu(pstate_freqs[i]); + + pr_debug("PState id %d freq %d MHz\n", id, freq); + powernv_freqs[i].frequency = freq * 1000; /* kHz */ + powernv_freqs[i].driver_data = id; + } + /* End of list marker entry */ + powernv_freqs[i].frequency = CPUFREQ_TABLE_END; + + powernv_pstate_info.min = pstate_min; + powernv_pstate_info.max = pstate_max; + powernv_pstate_info.nominal = pstate_nominal; + powernv_pstate_info.nr_pstates = nr_pstates; + + return 0; +} + +/* Returns the CPU frequency corresponding to the pstate_id. */ +static unsigned int pstate_id_to_freq(int pstate_id) +{ + int i; + + i = powernv_pstate_info.max - pstate_id; + BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0); + + return powernv_freqs[i].frequency; +} + +/* + * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by + * the firmware + */ +static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy, + char *buf) +{ + return sprintf(buf, "%u\n", + pstate_id_to_freq(powernv_pstate_info.nominal)); +} + +struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq = + __ATTR_RO(cpuinfo_nominal_freq); + +static struct freq_attr *powernv_cpu_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + &cpufreq_freq_attr_cpuinfo_nominal_freq, + NULL, +}; + +/* Helper routines */ + +/* Access helpers to power mgt SPR */ + +static inline unsigned long get_pmspr(unsigned long sprn) +{ + switch (sprn) { + case SPRN_PMCR: + return mfspr(SPRN_PMCR); + + case SPRN_PMICR: + return mfspr(SPRN_PMICR); + + case SPRN_PMSR: + return mfspr(SPRN_PMSR); + } + BUG(); +} + +static inline void set_pmspr(unsigned long sprn, unsigned long val) +{ + switch (sprn) { + case SPRN_PMCR: + mtspr(SPRN_PMCR, val); + return; + + case SPRN_PMICR: + mtspr(SPRN_PMICR, val); + return; + } + BUG(); +} + +/* + * Use objects of this type to query/update + * pstates on a remote CPU via smp_call_function. + */ +struct powernv_smp_call_data { + unsigned int freq; + int pstate_id; +}; + +/* + * powernv_read_cpu_freq: Reads the current frequency on this CPU. + * + * Called via smp_call_function. + * + * Note: The caller of the smp_call_function should pass an argument of + * the type 'struct powernv_smp_call_data *' along with this function. + * + * The current frequency on this CPU will be returned via + * ((struct powernv_smp_call_data *)arg)->freq; + */ +static void powernv_read_cpu_freq(void *arg) +{ + unsigned long pmspr_val; + s8 local_pstate_id; + struct powernv_smp_call_data *freq_data = arg; + + pmspr_val = get_pmspr(SPRN_PMSR); + + /* + * The local pstate id corresponds bits 48..55 in the PMSR. + * Note: Watch out for the sign! + */ + local_pstate_id = (pmspr_val >> 48) & 0xFF; + freq_data->pstate_id = local_pstate_id; + freq_data->freq = pstate_id_to_freq(freq_data->pstate_id); + + pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n", + raw_smp_processor_id(), pmspr_val, freq_data->pstate_id, + freq_data->freq); +} + +/* + * powernv_cpufreq_get: Returns the CPU frequency as reported by the + * firmware for CPU 'cpu'. This value is reported through the sysfs + * file cpuinfo_cur_freq. + */ +static unsigned int powernv_cpufreq_get(unsigned int cpu) +{ + struct powernv_smp_call_data freq_data; + + smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq, + &freq_data, 1); + + return freq_data.freq; +} + +/* + * set_pstate: Sets the pstate on this CPU. + * + * This is called via an smp_call_function. + * + * The caller must ensure that freq_data is of the type + * (struct powernv_smp_call_data *) and the pstate_id which needs to be set + * on this CPU should be present in freq_data->pstate_id. + */ +static void set_pstate(void *freq_data) +{ + unsigned long val; + unsigned long pstate_ul = + ((struct powernv_smp_call_data *) freq_data)->pstate_id; + + val = get_pmspr(SPRN_PMCR); + val = val & 0x0000FFFFFFFFFFFFULL; + + pstate_ul = pstate_ul & 0xFF; + + /* Set both global(bits 56..63) and local(bits 48..55) PStates */ + val = val | (pstate_ul << 56) | (pstate_ul << 48); + + pr_debug("Setting cpu %d pmcr to %016lX\n", + raw_smp_processor_id(), val); + set_pmspr(SPRN_PMCR, val); +} + +/* + * powernv_cpufreq_target_index: Sets the frequency corresponding to + * the cpufreq table entry indexed by new_index on the cpus in the + * mask policy->cpus + */ +static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, + unsigned int new_index) +{ + struct powernv_smp_call_data freq_data; + + freq_data.pstate_id = powernv_freqs[new_index].driver_data; + + /* + * Use smp_call_function to send IPI and execute the + * mtspr on target CPU. We could do that without IPI + * if current CPU is within policy->cpus (core) + */ + smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); + + return 0; +} + +static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + int base, i; + + base = cpu_first_thread_sibling(policy->cpu); + + for (i = 0; i < threads_per_core; i++) + cpumask_set_cpu(base + i, policy->cpus); + + return cpufreq_table_validate_and_show(policy, powernv_freqs); +} + +static struct cpufreq_driver powernv_cpufreq_driver = { + .name = "powernv-cpufreq", + .flags = CPUFREQ_CONST_LOOPS, + .init = powernv_cpufreq_cpu_init, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernv_cpufreq_target_index, + .get = powernv_cpufreq_get, + .attr = powernv_cpu_freq_attr, +}; + +static int __init powernv_cpufreq_init(void) +{ + int rc = 0; + + /* Discover pstates from device tree and init */ + rc = init_powernv_pstates(); + if (rc) { + pr_info("powernv-cpufreq disabled. System does not support PState control\n"); + return rc; + } + + return cpufreq_register_driver(&powernv_cpufreq_driver); +} +module_init(powernv_cpufreq_init); + +static void __exit powernv_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&powernv_cpufreq_driver); +} +module_exit(powernv_cpufreq_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>"); diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c new file mode 100644 index 00000000000..3607070797a --- /dev/null +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c @@ -0,0 +1,334 @@ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + * + * CPU Frequency Scaling driver for Freescale PowerPC corenet SoCs. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clk.h> +#include <linux/cpufreq.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <sysdev/fsl_soc.h> + +/** + * struct cpu_data - per CPU data struct + * @parent: the parent node of cpu clock + * @table: frequency table + */ +struct cpu_data { + struct device_node *parent; + struct cpufreq_frequency_table *table; +}; + +/** + * struct soc_data - SoC specific data + * @freq_mask: mask the disallowed frequencies + * @flag: unique flags + */ +struct soc_data { + u32 freq_mask[4]; + u32 flag; +}; + +#define FREQ_MASK 1 +/* see hardware specification for the allowed frqeuencies */ +static const struct soc_data sdata[] = { + { /* used by p2041 and p3041 */ + .freq_mask = {0x8, 0x8, 0x2, 0x2}, + .flag = FREQ_MASK, + }, + { /* used by p5020 */ + .freq_mask = {0x8, 0x2}, + .flag = FREQ_MASK, + }, + { /* used by p4080, p5040 */ + .freq_mask = {0}, + .flag = 0, + }, +}; + +/* + * the minimum allowed core frequency, in Hz + * for chassis v1.0, >= platform frequency + * for chassis v2.0, >= platform frequency / 2 + */ +static u32 min_cpufreq; +static const u32 *fmask; + +static DEFINE_PER_CPU(struct cpu_data *, cpu_data); + +/* cpumask in a cluster */ +static DEFINE_PER_CPU(cpumask_var_t, cpu_mask); + +#ifndef CONFIG_SMP +static inline const struct cpumask *cpu_core_mask(int cpu) +{ + return cpumask_of(0); +} +#endif + +/* reduce the duplicated frequencies in frequency table */ +static void freq_table_redup(struct cpufreq_frequency_table *freq_table, + int count) +{ + int i, j; + + for (i = 1; i < count; i++) { + for (j = 0; j < i; j++) { + if (freq_table[j].frequency == CPUFREQ_ENTRY_INVALID || + freq_table[j].frequency != + freq_table[i].frequency) + continue; + + freq_table[i].frequency = CPUFREQ_ENTRY_INVALID; + break; + } + } +} + +/* sort the frequencies in frequency table in descenting order */ +static void freq_table_sort(struct cpufreq_frequency_table *freq_table, + int count) +{ + int i, j, ind; + unsigned int freq, max_freq; + struct cpufreq_frequency_table table; + for (i = 0; i < count - 1; i++) { + max_freq = freq_table[i].frequency; + ind = i; + for (j = i + 1; j < count; j++) { + freq = freq_table[j].frequency; + if (freq == CPUFREQ_ENTRY_INVALID || + freq <= max_freq) + continue; + ind = j; + max_freq = freq; + } + + if (ind != i) { + /* exchange the frequencies */ + table.driver_data = freq_table[i].driver_data; + table.frequency = freq_table[i].frequency; + freq_table[i].driver_data = freq_table[ind].driver_data; + freq_table[i].frequency = freq_table[ind].frequency; + freq_table[ind].driver_data = table.driver_data; + freq_table[ind].frequency = table.frequency; + } + } +} + +static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + struct device_node *np; + int i, count, ret; + u32 freq, mask; + struct clk *clk; + struct cpufreq_frequency_table *table; + struct cpu_data *data; + unsigned int cpu = policy->cpu; + u64 u64temp; + + np = of_get_cpu_node(cpu, NULL); + if (!np) + return -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + pr_err("%s: no memory\n", __func__); + goto err_np; + } + + policy->clk = of_clk_get(np, 0); + if (IS_ERR(policy->clk)) { + pr_err("%s: no clock information\n", __func__); + goto err_nomem2; + } + + data->parent = of_parse_phandle(np, "clocks", 0); + if (!data->parent) { + pr_err("%s: could not get clock information\n", __func__); + goto err_nomem2; + } + + count = of_property_count_strings(data->parent, "clock-names"); + table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL); + if (!table) { + pr_err("%s: no memory\n", __func__); + goto err_node; + } + + if (fmask) + mask = fmask[get_hard_smp_processor_id(cpu)]; + else + mask = 0x0; + + for (i = 0; i < count; i++) { + clk = of_clk_get(data->parent, i); + freq = clk_get_rate(clk); + /* + * the clock is valid if its frequency is not masked + * and large than minimum allowed frequency. + */ + if (freq < min_cpufreq || (mask & (1 << i))) + table[i].frequency = CPUFREQ_ENTRY_INVALID; + else + table[i].frequency = freq / 1000; + table[i].driver_data = i; + } + freq_table_redup(table, count); + freq_table_sort(table, count); + table[i].frequency = CPUFREQ_TABLE_END; + + /* set the min and max frequency properly */ + ret = cpufreq_table_validate_and_show(policy, table); + if (ret) { + pr_err("invalid frequency table: %d\n", ret); + goto err_nomem1; + } + + data->table = table; + per_cpu(cpu_data, cpu) = data; + + /* update ->cpus if we have cluster, no harm if not */ + cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu)); + for_each_cpu(i, per_cpu(cpu_mask, cpu)) + per_cpu(cpu_data, i) = data; + + /* Minimum transition latency is 12 platform clocks */ + u64temp = 12ULL * NSEC_PER_SEC; + do_div(u64temp, fsl_get_sys_freq()); + policy->cpuinfo.transition_latency = u64temp + 1; + + of_node_put(np); + + return 0; + +err_nomem1: + kfree(table); +err_node: + of_node_put(data->parent); +err_nomem2: + per_cpu(cpu_data, cpu) = NULL; + kfree(data); +err_np: + of_node_put(np); + + return -ENODEV; +} + +static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct cpu_data *data = per_cpu(cpu_data, policy->cpu); + unsigned int cpu; + + of_node_put(data->parent); + kfree(data->table); + kfree(data); + + for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu)) + per_cpu(cpu_data, cpu) = NULL; + + return 0; +} + +static int corenet_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + struct clk *parent; + struct cpu_data *data = per_cpu(cpu_data, policy->cpu); + + parent = of_clk_get(data->parent, data->table[index].driver_data); + return clk_set_parent(policy->clk, parent); +} + +static struct cpufreq_driver ppc_corenet_cpufreq_driver = { + .name = "ppc_cpufreq", + .flags = CPUFREQ_CONST_LOOPS, + .init = corenet_cpufreq_cpu_init, + .exit = __exit_p(corenet_cpufreq_cpu_exit), + .verify = cpufreq_generic_frequency_table_verify, + .target_index = corenet_cpufreq_target, + .get = cpufreq_generic_get, + .attr = cpufreq_generic_attr, +}; + +static const struct of_device_id node_matches[] __initdata = { + { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], }, + { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], }, + { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], }, + { .compatible = "fsl,p4080-clockgen", .data = &sdata[2], }, + { .compatible = "fsl,p5040-clockgen", .data = &sdata[2], }, + { .compatible = "fsl,qoriq-clockgen-2.0", }, + {} +}; + +static int __init ppc_corenet_cpufreq_init(void) +{ + int ret; + struct device_node *np; + const struct of_device_id *match; + const struct soc_data *data; + unsigned int cpu; + + np = of_find_matching_node(NULL, node_matches); + if (!np) + return -ENODEV; + + for_each_possible_cpu(cpu) { + if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL)) + goto err_mask; + cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu)); + } + + match = of_match_node(node_matches, np); + data = match->data; + if (data) { + if (data->flag) + fmask = data->freq_mask; + min_cpufreq = fsl_get_sys_freq(); + } else { + min_cpufreq = fsl_get_sys_freq() / 2; + } + + of_node_put(np); + + ret = cpufreq_register_driver(&ppc_corenet_cpufreq_driver); + if (!ret) + pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n"); + + return ret; + +err_mask: + for_each_possible_cpu(cpu) + free_cpumask_var(per_cpu(cpu_mask, cpu)); + + return -ENOMEM; +} +module_init(ppc_corenet_cpufreq_init); + +static void __exit ppc_corenet_cpufreq_exit(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + free_cpumask_var(per_cpu(cpu_mask, cpu)); + + cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver); +} +module_exit(ppc_corenet_cpufreq_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>"); +MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs"); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index e577a1dbbfc..5a4c5a639f6 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -30,20 +30,17 @@ #include "ppc_cbe_cpufreq.h" -static DEFINE_MUTEX(cbe_switch_mutex); - - /* the CBE supports an 8 step frequency scaling */ static struct cpufreq_frequency_table cbe_freqs[] = { - {1, 0}, - {2, 0}, - {3, 0}, - {4, 0}, - {5, 0}, - {6, 0}, - {8, 0}, - {10, 0}, - {0, CPUFREQ_TABLE_END}, + {0, 1, 0}, + {0, 2, 0}, + {0, 3, 0}, + {0, 4, 0}, + {0, 5, 0}, + {0, 6, 0}, + {0, 8, 0}, + {0, 10, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; /* @@ -70,9 +67,10 @@ static int set_pmode(unsigned int cpu, unsigned int slow_mode) static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) { + struct cpufreq_frequency_table *pos; const u32 *max_freqp; u32 max_freq; - int i, cur_pmode; + int cur_pmode; struct device_node *cpu; cpu = of_get_cpu_node(policy->cpu, NULL); @@ -105,9 +103,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_debug("initializing frequency table\n"); /* initialize frequency table */ - for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { - cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index; - pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); + cpufreq_for_each_entry(pos, cbe_freqs) { + pos->frequency = max_freq / pos->driver_data; + pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency); } /* if DEBUG is enabled set_pmode() measures the latency @@ -123,65 +121,28 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif - cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); - /* this ensures that policy->cpuinfo_min * and policy->cpuinfo_max are set correctly */ - return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); -} - -static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static int cbe_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, cbe_freqs); + return cpufreq_table_validate_and_show(policy, cbe_freqs); } static int cbe_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int cbe_pmode_new) { - int rc; - struct cpufreq_freqs freqs; - unsigned int cbe_pmode_new; - - cpufreq_frequency_table_target(policy, - cbe_freqs, - target_freq, - relation, - &cbe_pmode_new); - - freqs.old = policy->cur; - freqs.new = cbe_freqs[cbe_pmode_new].frequency; - - mutex_lock(&cbe_switch_mutex); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - pr_debug("setting frequency for cpu %d to %d kHz, " \ "1/%d of max frequency\n", policy->cpu, cbe_freqs[cbe_pmode_new].frequency, - cbe_freqs[cbe_pmode_new].index); - - rc = set_pmode(policy->cpu, cbe_pmode_new); + cbe_freqs[cbe_pmode_new].driver_data); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - mutex_unlock(&cbe_switch_mutex); - - return rc; + return set_pmode(policy->cpu, cbe_pmode_new); } static struct cpufreq_driver cbe_cpufreq_driver = { - .verify = cbe_cpufreq_verify, - .target = cbe_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = cbe_cpufreq_target, .init = cbe_cpufreq_cpu_init, - .exit = cbe_cpufreq_cpu_exit, .name = "cbe-cpufreq", - .owner = THIS_MODULE, .flags = CPUFREQ_CONST_LOOPS, }; diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index 9e5bc8e388a..e24269ab4e9 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c @@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) return ret; } -static __init void pxa_cpufreq_init_voltages(void) +static void __init pxa_cpufreq_init_voltages(void) { vcc_core = regulator_get(NULL, "vcc_core"); if (IS_ERR(vcc_core)) { @@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) return 0; } -static __init void pxa_cpufreq_init_voltages(void) { } +static void __init pxa_cpufreq_init_voltages(void) { } #endif static void find_freq_tables(struct cpufreq_frequency_table **freq_table, @@ -262,36 +262,15 @@ static u32 mdrefr_dri(unsigned int freq) return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; } -/* find a valid frequency point */ -static int pxa_verify_policy(struct cpufreq_policy *policy) -{ - struct cpufreq_frequency_table *pxa_freqs_table; - pxa_freqs_t *pxa_freqs; - int ret; - - find_freq_tables(&pxa_freqs_table, &pxa_freqs); - ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table); - - if (freq_debug) - pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n", - policy->min, policy->max); - - return ret; -} - static unsigned int pxa_cpufreq_get(unsigned int cpu) { return get_clk_frequency_khz(0); } -static int pxa_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx) { struct cpufreq_frequency_table *pxa_freqs_table; pxa_freqs_t *pxa_freq_settings; - struct cpufreq_freqs freqs; - unsigned int idx; unsigned long flags; unsigned int new_freq_cpu, new_freq_mem; unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; @@ -300,32 +279,19 @@ static int pxa_set_target(struct cpufreq_policy *policy, /* Get the current policy */ find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, pxa_freqs_table, - target_freq, relation, &idx)) { - return -EINVAL; - } - new_freq_cpu = pxa_freq_settings[idx].khz; new_freq_mem = pxa_freq_settings[idx].membus; - freqs.old = policy->cur; - freqs.new = new_freq_cpu; if (freq_debug) pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", - freqs.new / 1000, (pxa_freq_settings[idx].div2) ? + new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ? (new_freq_mem / 2000) : (new_freq_mem / 1000)); - if (vcc_core && freqs.new > freqs.old) + if (vcc_core && new_freq_cpu > policy->cur) { ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); - if (ret) - return ret; - /* - * Tell everyone what we're about to do... - * you should add a notify client with any platform specific - * Vcc changing capability - */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + if (ret) + return ret; + } /* Calculate the next MDREFR. If we're slowing down the SDRAM clock * we need to preset the smaller DRI before the change. If we're @@ -376,13 +342,6 @@ static int pxa_set_target(struct cpufreq_policy *policy, local_irq_restore(flags); /* - * Tell everyone what we've just done... - * you should add a notify client with any platform specific - * SDRAM refresh timer adjustments - */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - /* * Even if voltage setting fails, we don't report it, as the frequency * change succeeded. The voltage reduction is not a critical failure, * only power savings will suffer from this. @@ -391,7 +350,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, * bug is triggered (seems a deadlock). Should anybody find out where, * the "return 0" should become a "return ret". */ - if (vcc_core && freqs.new < freqs.old) + if (vcc_core && new_freq_cpu < policy->cur) ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); return 0; @@ -414,13 +373,11 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) /* set default policy and cpuinfo */ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->cur = get_clk_frequency_khz(0); /* current freq */ - policy->min = policy->max = policy->cur; /* Generate pxa25x the run cpufreq_frequency_table struct */ for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz; - pxa255_run_freq_table[i].index = i; + pxa255_run_freq_table[i].driver_data = i; } pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END; @@ -428,7 +385,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) { pxa255_turbo_freq_table[i].frequency = pxa255_turbo_freqs[i].khz; - pxa255_turbo_freq_table[i].index = i; + pxa255_turbo_freq_table[i].driver_data = i; } pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; @@ -440,9 +397,9 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) if (freq > pxa27x_maxfreq) break; pxa27x_freq_table[i].frequency = freq; - pxa27x_freq_table[i].index = i; + pxa27x_freq_table[i].driver_data = i; } - pxa27x_freq_table[i].index = i; + pxa27x_freq_table[i].driver_data = i; pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; /* @@ -453,10 +410,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) find_freq_tables(&pxa255_freq_table, &pxa255_freqs); pr_info("PXA255 cpufreq using %s frequency table\n", pxa255_turbo_table ? "turbo" : "run"); - cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table); + + cpufreq_table_validate_and_show(policy, pxa255_freq_table); + } + else if (cpu_is_pxa27x()) { + cpufreq_table_validate_and_show(policy, pxa27x_freq_table); } - else if (cpu_is_pxa27x()) - cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table); printk(KERN_INFO "PXA CPU frequency change support initialized\n"); @@ -464,8 +423,9 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) } static struct cpufreq_driver pxa_cpufreq_driver = { - .verify = pxa_verify_policy, - .target = pxa_set_target, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = pxa_set_target, .init = pxa_cpufreq_init, .get = pxa_cpufreq_get, .name = "PXA2xx", diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c index 15d60f857ad..a0127590038 100644 --- a/drivers/cpufreq/pxa3xx-cpufreq.c +++ b/drivers/cpufreq/pxa3xx-cpufreq.c @@ -98,17 +98,17 @@ static int setup_freqs_table(struct cpufreq_policy *policy, return -ENOMEM; for (i = 0; i < num; i++) { - table[i].index = i; + table[i].driver_data = i; table[i].frequency = freqs[i].cpufreq_mhz * 1000; } - table[num].index = i; + table[num].driver_data = i; table[num].frequency = CPUFREQ_TABLE_END; pxa3xx_freqs = freqs; pxa3xx_freqs_num = num; pxa3xx_freqs_table = table; - return cpufreq_frequency_table_cpuinfo(policy, table); + return cpufreq_table_validate_and_show(policy, table); } static void __update_core_freq(struct pxa3xx_freq_info *info) @@ -150,54 +150,26 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info) cpu_relax(); } -static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table); -} - static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) { return pxa3xx_get_clk_frequency_khz(0); } -static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index) { struct pxa3xx_freq_info *next; - struct cpufreq_freqs freqs; unsigned long flags; - int idx; if (policy->cpu != 0) return -EINVAL; - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table, - target_freq, relation, &idx)) - return -EINVAL; - - next = &pxa3xx_freqs[idx]; - - freqs.old = policy->cur; - freqs.new = next->cpufreq_mhz * 1000; - - pr_debug("CPU frequency from %d MHz to %d MHz%s\n", - freqs.old / 1000, freqs.new / 1000, - (freqs.old == freqs.new) ? " (skipped)" : ""); - - if (freqs.old == target_freq) - return 0; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + next = &pxa3xx_freqs[index]; local_irq_save(flags); __update_core_freq(next); __update_bus_freq(next); local_irq_restore(flags); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return 0; } @@ -206,17 +178,18 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) int ret = -EINVAL; /* set default policy and cpuinfo */ - policy->cpuinfo.min_freq = 104000; - policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000; + policy->min = policy->cpuinfo.min_freq = 104000; + policy->max = policy->cpuinfo.max_freq = + (cpu_is_pxa320()) ? 806000 : 624000; policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->max = pxa3xx_get_clk_frequency_khz(0); - policy->cur = policy->min = policy->max; if (cpu_is_pxa300() || cpu_is_pxa310()) - ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs)); + ret = setup_freqs_table(policy, pxa300_freqs, + ARRAY_SIZE(pxa300_freqs)); if (cpu_is_pxa320()) - ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs)); + ret = setup_freqs_table(policy, pxa320_freqs, + ARRAY_SIZE(pxa320_freqs)); if (ret) { pr_err("failed to setup frequency table\n"); @@ -228,8 +201,9 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) } static struct cpufreq_driver pxa3xx_cpufreq_driver = { - .verify = pxa3xx_cpufreq_verify, - .target = pxa3xx_cpufreq_set, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = pxa3xx_cpufreq_set, .init = pxa3xx_cpufreq_init, .get = pxa3xx_cpufreq_get, .name = "pxa3xx-cpufreq", diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c new file mode 100644 index 00000000000..cfa0dd8723e --- /dev/null +++ b/drivers/cpufreq/s3c2410-cpufreq.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2006-2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C2410 CPU Frequency scaling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include <mach/regs-clock.h> + +#include <plat/cpu.h> +#include <plat/clock.h> +#include <plat/cpu-freq-core.h> + +/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */ + +static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + u32 clkdiv = 0; + + if (cfg->divs.h_divisor == 2) + clkdiv |= S3C2410_CLKDIVN_HDIVN; + + if (cfg->divs.p_divisor != cfg->divs.h_divisor) + clkdiv |= S3C2410_CLKDIVN_PDIVN; + + __raw_writel(clkdiv, S3C2410_CLKDIVN); +} + +static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned long hclk, fclk, pclk; + unsigned int hdiv, pdiv; + unsigned long hclk_max; + + fclk = cfg->freq.fclk; + hclk_max = cfg->max.hclk; + + cfg->freq.armclk = fclk; + + s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n", + __func__, fclk, hclk_max); + + hdiv = (fclk > cfg->max.hclk) ? 2 : 1; + hclk = fclk / hdiv; + + if (hclk > cfg->max.hclk) { + s3c_freq_dbg("%s: hclk too big\n", __func__); + return -EINVAL; + } + + pdiv = (hclk > cfg->max.pclk) ? 2 : 1; + pclk = hclk / pdiv; + + if (pclk > cfg->max.pclk) { + s3c_freq_dbg("%s: pclk too big\n", __func__); + return -EINVAL; + } + + pdiv *= hdiv; + + /* record the result */ + cfg->divs.p_divisor = pdiv; + cfg->divs.h_divisor = hdiv; + + return 0; +} + +static struct s3c_cpufreq_info s3c2410_cpufreq_info = { + .max = { + .fclk = 200000000, + .hclk = 100000000, + .pclk = 50000000, + }, + + /* transition latency is about 5ms worst-case, so + * set 10ms to be sure */ + .latency = 10000000, + + .locktime_m = 150, + .locktime_u = 150, + .locktime_bits = 12, + + .need_pll = 1, + + .name = "s3c2410", + .calc_iotiming = s3c2410_iotiming_calc, + .set_iotiming = s3c2410_iotiming_set, + .get_iotiming = s3c2410_iotiming_get, + .resume_clocks = s3c2410_setup_clocks, + + .set_fvco = s3c2410_set_fvco, + .set_refresh = s3c2410_cpufreq_setrefresh, + .set_divs = s3c2410_cpufreq_setdivs, + .calc_divs = s3c2410_cpufreq_calcdivs, + + .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), +}; + +static int s3c2410_cpufreq_add(struct device *dev, + struct subsys_interface *sif) +{ + return s3c_cpufreq_register(&s3c2410_cpufreq_info); +} + +static struct subsys_interface s3c2410_cpufreq_interface = { + .name = "s3c2410_cpufreq", + .subsys = &s3c2410_subsys, + .add_dev = s3c2410_cpufreq_add, +}; + +static int __init s3c2410_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2410_cpufreq_interface); +} +arch_initcall(s3c2410_cpufreq_init); + +static int s3c2410a_cpufreq_add(struct device *dev, + struct subsys_interface *sif) +{ + /* alter the maximum freq settings for S3C2410A. If a board knows + * it only has a maximum of 200, then it should register its own + * limits. */ + + s3c2410_cpufreq_info.max.fclk = 266000000; + s3c2410_cpufreq_info.max.hclk = 133000000; + s3c2410_cpufreq_info.max.pclk = 66500000; + s3c2410_cpufreq_info.name = "s3c2410a"; + + return s3c2410_cpufreq_add(dev, sif); +} + +static struct subsys_interface s3c2410a_cpufreq_interface = { + .name = "s3c2410a_cpufreq", + .subsys = &s3c2410a_subsys, + .add_dev = s3c2410a_cpufreq_add, +}; + +static int __init s3c2410a_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2410a_cpufreq_interface); +} +arch_initcall(s3c2410a_cpufreq_init); diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c new file mode 100644 index 00000000000..4645b489899 --- /dev/null +++ b/drivers/cpufreq/s3c2412-cpufreq.c @@ -0,0 +1,257 @@ +/* + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C2412 CPU Frequency scalling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include <mach/regs-clock.h> +#include <mach/s3c2412.h> + +#include <plat/cpu.h> +#include <plat/clock.h> +#include <plat/cpu-freq-core.h> + +/* our clock resources. */ +static struct clk *xtal; +static struct clk *fclk; +static struct clk *hclk; +static struct clk *armclk; + +/* HDIV: 1, 2, 3, 4, 6, 8 */ + +static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned int hdiv, pdiv, armdiv, dvs; + unsigned long hclk, fclk, armclk, armdiv_clk; + unsigned long hclk_max; + + fclk = cfg->freq.fclk; + armclk = cfg->freq.armclk; + hclk_max = cfg->max.hclk; + + /* We can't run hclk above armclk as at the best we have to + * have armclk and hclk in dvs mode. */ + + if (hclk_max > armclk) + hclk_max = armclk; + + s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n", + __func__, fclk, armclk, hclk_max); + s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n", + __func__, cfg->freq.fclk, cfg->freq.armclk, + cfg->freq.hclk, cfg->freq.pclk); + + armdiv = fclk / armclk; + + if (armdiv < 1) + armdiv = 1; + if (armdiv > 2) + armdiv = 2; + + cfg->divs.arm_divisor = armdiv; + armdiv_clk = fclk / armdiv; + + hdiv = armdiv_clk / hclk_max; + if (hdiv < 1) + hdiv = 1; + + cfg->freq.hclk = hclk = armdiv_clk / hdiv; + + /* set dvs depending on whether we reached armclk or not. */ + cfg->divs.dvs = dvs = armclk < armdiv_clk; + + /* update the actual armclk we achieved. */ + cfg->freq.armclk = dvs ? hclk : armdiv_clk; + + s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n", + __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs); + + if (hdiv > 4) + goto invalid; + + pdiv = (hclk > cfg->max.pclk) ? 2 : 1; + + if ((hclk / pdiv) > cfg->max.pclk) + pdiv++; + + cfg->freq.pclk = hclk / pdiv; + + s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv); + + if (pdiv > 2) + goto invalid; + + pdiv *= hdiv; + + /* store the result, and then return */ + + cfg->divs.h_divisor = hdiv * armdiv; + cfg->divs.p_divisor = pdiv * armdiv; + + return 0; + +invalid: + return -EINVAL; +} + +static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned long clkdiv; + unsigned long olddiv; + + olddiv = clkdiv = __raw_readl(S3C2410_CLKDIVN); + + /* clear off current clock info */ + + clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN; + clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK; + clkdiv &= ~S3C2412_CLKDIVN_PDIVN; + + if (cfg->divs.arm_divisor == 2) + clkdiv |= S3C2412_CLKDIVN_ARMDIVN; + + clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1); + + if (cfg->divs.p_divisor != cfg->divs.h_divisor) + clkdiv |= S3C2412_CLKDIVN_PDIVN; + + s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv); + __raw_writel(clkdiv, S3C2410_CLKDIVN); + + clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk); +} + +static void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) +{ + struct s3c_cpufreq_board *board = cfg->board; + unsigned long refresh; + + s3c_freq_dbg("%s: refresh %u ns, hclk %lu\n", __func__, + board->refresh, cfg->freq.hclk); + + /* Reduce both the refresh time (in ns) and the frequency (in MHz) + * by 10 each to ensure that we do not overflow 32 bit numbers. This + * should work for HCLK up to 133MHz and refresh period up to 30usec. + */ + + refresh = (board->refresh / 10); + refresh *= (cfg->freq.hclk / 100); + refresh /= (1 * 1000 * 1000); /* 10^6 */ + + s3c_freq_dbg("%s: setting refresh 0x%08lx\n", __func__, refresh); + __raw_writel(refresh, S3C2412_REFRESH); +} + +/* set the default cpu frequency information, based on an 200MHz part + * as we have no other way of detecting the speed rating in software. + */ + +static struct s3c_cpufreq_info s3c2412_cpufreq_info = { + .max = { + .fclk = 200000000, + .hclk = 100000000, + .pclk = 50000000, + }, + + .latency = 5000000, /* 5ms */ + + .locktime_m = 150, + .locktime_u = 150, + .locktime_bits = 16, + + .name = "s3c2412", + .set_refresh = s3c2412_cpufreq_setrefresh, + .set_divs = s3c2412_cpufreq_setdivs, + .calc_divs = s3c2412_cpufreq_calcdivs, + + .calc_iotiming = s3c2412_iotiming_calc, + .set_iotiming = s3c2412_iotiming_set, + .get_iotiming = s3c2412_iotiming_get, + + .resume_clocks = s3c2412_setup_clocks, + + .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs), +}; + +static int s3c2412_cpufreq_add(struct device *dev, + struct subsys_interface *sif) +{ + unsigned long fclk_rate; + + hclk = clk_get(NULL, "hclk"); + if (IS_ERR(hclk)) { + printk(KERN_ERR "%s: cannot find hclk clock\n", __func__); + return -ENOENT; + } + + fclk = clk_get(NULL, "fclk"); + if (IS_ERR(fclk)) { + printk(KERN_ERR "%s: cannot find fclk clock\n", __func__); + goto err_fclk; + } + + fclk_rate = clk_get_rate(fclk); + if (fclk_rate > 200000000) { + printk(KERN_INFO + "%s: fclk %ld MHz, assuming 266MHz capable part\n", + __func__, fclk_rate / 1000000); + s3c2412_cpufreq_info.max.fclk = 266000000; + s3c2412_cpufreq_info.max.hclk = 133000000; + s3c2412_cpufreq_info.max.pclk = 66000000; + } + + armclk = clk_get(NULL, "armclk"); + if (IS_ERR(armclk)) { + printk(KERN_ERR "%s: cannot find arm clock\n", __func__); + goto err_armclk; + } + + xtal = clk_get(NULL, "xtal"); + if (IS_ERR(xtal)) { + printk(KERN_ERR "%s: cannot find xtal clock\n", __func__); + goto err_xtal; + } + + return s3c_cpufreq_register(&s3c2412_cpufreq_info); + +err_xtal: + clk_put(armclk); +err_armclk: + clk_put(fclk); +err_fclk: + clk_put(hclk); + + return -ENOENT; +} + +static struct subsys_interface s3c2412_cpufreq_interface = { + .name = "s3c2412_cpufreq", + .subsys = &s3c2412_subsys, + .add_dev = s3c2412_cpufreq_add, +}; + +static int s3c2412_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2412_cpufreq_interface); +} +arch_initcall(s3c2412_cpufreq_init); diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 4f1881eee3f..2fd53eaaec2 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -72,31 +72,21 @@ static struct s3c2416_dvfs s3c2416_dvfs_table[] = { #endif static struct cpufreq_frequency_table s3c2416_freq_table[] = { - { SOURCE_HCLK, FREQ_DVS }, - { SOURCE_ARMDIV, 133333 }, - { SOURCE_ARMDIV, 266666 }, - { SOURCE_ARMDIV, 400000 }, - { 0, CPUFREQ_TABLE_END }, + { 0, SOURCE_HCLK, FREQ_DVS }, + { 0, SOURCE_ARMDIV, 133333 }, + { 0, SOURCE_ARMDIV, 266666 }, + { 0, SOURCE_ARMDIV, 400000 }, + { 0, 0, CPUFREQ_TABLE_END }, }; static struct cpufreq_frequency_table s3c2450_freq_table[] = { - { SOURCE_HCLK, FREQ_DVS }, - { SOURCE_ARMDIV, 133500 }, - { SOURCE_ARMDIV, 267000 }, - { SOURCE_ARMDIV, 534000 }, - { 0, CPUFREQ_TABLE_END }, + { 0, SOURCE_HCLK, FREQ_DVS }, + { 0, SOURCE_ARMDIV, 133500 }, + { 0, SOURCE_ARMDIV, 267000 }, + { 0, SOURCE_ARMDIV, 534000 }, + { 0, 0, CPUFREQ_TABLE_END }, }; -static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy) -{ - struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; - - if (policy->cpu != 0) - return -EINVAL; - - return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table); -} - static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu) { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; @@ -205,7 +195,7 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx) ret = s3c2416_cpufreq_set_armdiv(s3c_freq, clk_get_rate(s3c_freq->hclk) / 1000); if (ret < 0) { - pr_err("cpufreq: Failed to to set the armdiv to %lukHz: %d\n", + pr_err("cpufreq: Failed to set the armdiv to %lukHz: %d\n", clk_get_rate(s3c_freq->hclk) / 1000, ret); return ret; } @@ -227,24 +217,15 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx) } static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; - struct cpufreq_freqs freqs; + unsigned int new_freq; int idx, ret, to_dvs = 0; - unsigned int i; mutex_lock(&cpufreq_lock); - pr_debug("cpufreq: to %dKHz, relation %d\n", target_freq, relation); - - ret = cpufreq_frequency_table_target(policy, s3c_freq->freq_table, - target_freq, relation, &i); - if (ret != 0) - goto out; - - idx = s3c_freq->freq_table[i].index; + idx = s3c_freq->freq_table[index].driver_data; if (idx == SOURCE_HCLK) to_dvs = 1; @@ -256,24 +237,13 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, goto out; } - freqs.flags = 0; - freqs.old = s3c_freq->is_dvs ? FREQ_DVS - : clk_get_rate(s3c_freq->armclk) / 1000; - /* When leavin dvs mode, always switch the armdiv to the hclk rate * The S3C2416 has stability issues when switching directly to * higher frequencies. */ - freqs.new = (s3c_freq->is_dvs && !to_dvs) + new_freq = (s3c_freq->is_dvs && !to_dvs) ? clk_get_rate(s3c_freq->hclk) / 1000 - : s3c_freq->freq_table[i].frequency; - - pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); - - if (!to_dvs && freqs.old == freqs.new) - goto out; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + : s3c_freq->freq_table[index].frequency; if (to_dvs) { pr_debug("cpufreq: enter dvs\n"); @@ -282,12 +252,10 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, pr_debug("cpufreq: leave dvs\n"); ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx); } else { - pr_debug("cpufreq: change armdiv to %dkHz\n", freqs.new); - ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new); + pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq); + ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq); } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - out: mutex_unlock(&cpufreq_lock); @@ -298,7 +266,7 @@ out: static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) { int count, v, i, found; - struct cpufreq_frequency_table *freq; + struct cpufreq_frequency_table *pos; struct s3c2416_dvfs *dvfs; count = regulator_count_voltages(s3c_freq->vddarm); @@ -307,12 +275,11 @@ static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) return; } - freq = s3c_freq->freq_table; - while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) { - if (freq->frequency == CPUFREQ_ENTRY_INVALID) - continue; + if (!count) + goto out; - dvfs = &s3c2416_dvfs_table[freq->index]; + cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) { + dvfs = &s3c2416_dvfs_table[pos->driver_data]; found = 0; /* Check only the min-voltage, more is always ok on S3C2416 */ @@ -324,13 +291,12 @@ static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) if (!found) { pr_debug("cpufreq: %dkHz unsupported by regulator\n", - freq->frequency); - freq->frequency = CPUFREQ_ENTRY_INVALID; + pos->frequency); + pos->frequency = CPUFREQ_ENTRY_INVALID; } - - freq++; } +out: /* Guessed */ s3c_freq->regulator_latency = 1 * 1000 * 1000; } @@ -370,7 +336,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = { static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; - struct cpufreq_frequency_table *freq; + struct cpufreq_frequency_table *pos; struct clk *msysclk; unsigned long rate; int ret; @@ -459,47 +425,37 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) s3c_freq->regulator_latency = 0; #endif - freq = s3c_freq->freq_table; - while (freq->frequency != CPUFREQ_TABLE_END) { + cpufreq_for_each_entry(pos, s3c_freq->freq_table) { /* special handling for dvs mode */ - if (freq->index == 0) { + if (pos->driver_data == 0) { if (!s3c_freq->hclk) { pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n", - freq->frequency); - freq->frequency = CPUFREQ_ENTRY_INVALID; + pos->frequency); + pos->frequency = CPUFREQ_ENTRY_INVALID; } else { - freq++; continue; } } /* Check for frequencies we can generate */ rate = clk_round_rate(s3c_freq->armdiv, - freq->frequency * 1000); + pos->frequency * 1000); rate /= 1000; - if (rate != freq->frequency) { + if (rate != pos->frequency) { pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n", - freq->frequency, rate); - freq->frequency = CPUFREQ_ENTRY_INVALID; + pos->frequency, rate); + pos->frequency = CPUFREQ_ENTRY_INVALID; } - - freq++; } - policy->cur = clk_get_rate(s3c_freq->armclk) / 1000; - /* Datasheet says PLL stabalisation time must be at least 300us, * so but add some fudge. (reference in LOCKCON0 register description) */ - policy->cpuinfo.transition_latency = (500 * 1000) + - s3c_freq->regulator_latency; - - ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table); + ret = cpufreq_generic_init(policy, s3c_freq->freq_table, + (500 * 1000) + s3c_freq->regulator_latency); if (ret) goto err_freq_table; - cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0); - register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier); return 0; @@ -518,20 +474,14 @@ err_hclk: return ret; } -static struct freq_attr *s3c2416_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver s3c2416_cpufreq_driver = { - .owner = THIS_MODULE, - .flags = 0, - .verify = s3c2416_cpufreq_verify_speed, - .target = s3c2416_cpufreq_set_target, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = s3c2416_cpufreq_set_target, .get = s3c2416_cpufreq_get_speed, .init = s3c2416_cpufreq_driver_init, .name = "s3c2416", - .attr = s3c2416_cpufreq_attr, + .attr = cpufreq_generic_attr, }; static int __init s3c2416_cpufreq_init(void) diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c new file mode 100644 index 00000000000..f84ed10755b --- /dev/null +++ b/drivers/cpufreq/s3c2440-cpufreq.c @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2006-2009 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * Vincent Sanders <vince@simtec.co.uk> + * + * S3C2440/S3C2442 CPU Frequency scaling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include <mach/regs-clock.h> + +#include <plat/cpu.h> +#include <plat/cpu-freq-core.h> +#include <plat/clock.h> + +static struct clk *xtal; +static struct clk *fclk; +static struct clk *hclk; +static struct clk *armclk; + +/* HDIV: 1, 2, 3, 4, 6, 8 */ + +static inline int within_khz(unsigned long a, unsigned long b) +{ + long diff = a - b; + + return (diff >= -1000 && diff <= 1000); +} + +/** + * s3c2440_cpufreq_calcdivs - calculate divider settings + * @cfg: The cpu frequency settings. + * + * Calcualte the divider values for the given frequency settings + * specified in @cfg. The values are stored in @cfg for later use + * by the relevant set routine if the request settings can be reached. + */ +static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned int hdiv, pdiv; + unsigned long hclk, fclk, armclk; + unsigned long hclk_max; + + fclk = cfg->freq.fclk; + armclk = cfg->freq.armclk; + hclk_max = cfg->max.hclk; + + s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n", + __func__, fclk, armclk, hclk_max); + + if (armclk > fclk) { + printk(KERN_WARNING "%s: armclk > fclk\n", __func__); + armclk = fclk; + } + + /* if we are in DVS, we need HCLK to be <= ARMCLK */ + if (armclk < fclk && armclk < hclk_max) + hclk_max = armclk; + + for (hdiv = 1; hdiv < 9; hdiv++) { + if (hdiv == 5 || hdiv == 7) + hdiv++; + + hclk = (fclk / hdiv); + if (hclk <= hclk_max || within_khz(hclk, hclk_max)) + break; + } + + s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv); + + if (hdiv > 8) + goto invalid; + + pdiv = (hclk > cfg->max.pclk) ? 2 : 1; + + if ((hclk / pdiv) > cfg->max.pclk) + pdiv++; + + s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv); + + if (pdiv > 2) + goto invalid; + + pdiv *= hdiv; + + /* calculate a valid armclk */ + + if (armclk < hclk) + armclk = hclk; + + /* if we're running armclk lower than fclk, this really means + * that the system should go into dvs mode, which means that + * armclk is connected to hclk. */ + if (armclk < fclk) { + cfg->divs.dvs = 1; + armclk = hclk; + } else + cfg->divs.dvs = 0; + + cfg->freq.armclk = armclk; + + /* store the result, and then return */ + + cfg->divs.h_divisor = hdiv; + cfg->divs.p_divisor = pdiv; + + return 0; + + invalid: + return -EINVAL; +} + +#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \ + S3C2440_CAMDIVN_HCLK4_HALF) + +/** + * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings + * @cfg: The cpu frequency settings. + * + * Set the divisors from the settings in @cfg, which where generated + * during the calculation phase by s3c2440_cpufreq_calcdivs(). + */ +static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned long clkdiv, camdiv; + + s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__, + cfg->divs.h_divisor, cfg->divs.p_divisor); + + clkdiv = __raw_readl(S3C2410_CLKDIVN); + camdiv = __raw_readl(S3C2440_CAMDIVN); + + clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN); + camdiv &= ~CAMDIVN_HCLK_HALF; + + switch (cfg->divs.h_divisor) { + case 1: + clkdiv |= S3C2440_CLKDIVN_HDIVN_1; + break; + + case 2: + clkdiv |= S3C2440_CLKDIVN_HDIVN_2; + break; + + case 6: + camdiv |= S3C2440_CAMDIVN_HCLK3_HALF; + case 3: + clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6; + break; + + case 8: + camdiv |= S3C2440_CAMDIVN_HCLK4_HALF; + case 4: + clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8; + break; + + default: + BUG(); /* we don't expect to get here. */ + } + + if (cfg->divs.p_divisor != cfg->divs.h_divisor) + clkdiv |= S3C2440_CLKDIVN_PDIVN; + + /* todo - set pclk. */ + + /* Write the divisors first with hclk intentionally halved so that + * when we write clkdiv we will under-frequency instead of over. We + * then make a short delay and remove the hclk halving if necessary. + */ + + __raw_writel(camdiv | CAMDIVN_HCLK_HALF, S3C2440_CAMDIVN); + __raw_writel(clkdiv, S3C2410_CLKDIVN); + + ndelay(20); + __raw_writel(camdiv, S3C2440_CAMDIVN); + + clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk); +} + +static int run_freq_for(unsigned long max_hclk, unsigned long fclk, + int *divs, + struct cpufreq_frequency_table *table, + size_t table_size) +{ + unsigned long freq; + int index = 0; + int div; + + for (div = *divs; div > 0; div = *divs++) { + freq = fclk / div; + + if (freq > max_hclk && div != 1) + continue; + + freq /= 1000; /* table is in kHz */ + index = s3c_cpufreq_addfreq(table, index, table_size, freq); + if (index < 0) + break; + } + + return index; +} + +static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 }; + +static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg, + struct cpufreq_frequency_table *table, + size_t table_size) +{ + int ret; + + WARN_ON(cfg->info == NULL); + WARN_ON(cfg->board == NULL); + + ret = run_freq_for(cfg->info->max.hclk, + cfg->info->max.fclk, + hclk_divs, + table, table_size); + + s3c_freq_dbg("%s: returning %d\n", __func__, ret); + + return ret; +} + +static struct s3c_cpufreq_info s3c2440_cpufreq_info = { + .max = { + .fclk = 400000000, + .hclk = 133333333, + .pclk = 66666666, + }, + + .locktime_m = 300, + .locktime_u = 300, + .locktime_bits = 16, + + .name = "s3c244x", + .calc_iotiming = s3c2410_iotiming_calc, + .set_iotiming = s3c2410_iotiming_set, + .get_iotiming = s3c2410_iotiming_get, + .set_fvco = s3c2410_set_fvco, + + .set_refresh = s3c2410_cpufreq_setrefresh, + .set_divs = s3c2440_cpufreq_setdivs, + .calc_divs = s3c2440_cpufreq_calcdivs, + .calc_freqtable = s3c2440_cpufreq_calctable, + + .resume_clocks = s3c244x_setup_clocks, + + .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), +}; + +static int s3c2440_cpufreq_add(struct device *dev, + struct subsys_interface *sif) +{ + xtal = s3c_cpufreq_clk_get(NULL, "xtal"); + hclk = s3c_cpufreq_clk_get(NULL, "hclk"); + fclk = s3c_cpufreq_clk_get(NULL, "fclk"); + armclk = s3c_cpufreq_clk_get(NULL, "armclk"); + + if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) { + printk(KERN_ERR "%s: failed to get clocks\n", __func__); + return -ENOENT; + } + + return s3c_cpufreq_register(&s3c2440_cpufreq_info); +} + +static struct subsys_interface s3c2440_cpufreq_interface = { + .name = "s3c2440_cpufreq", + .subsys = &s3c2440_subsys, + .add_dev = s3c2440_cpufreq_add, +}; + +static int s3c2440_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2440_cpufreq_interface); +} + +/* arch_initcall adds the clocks we need, so use subsys_initcall. */ +subsys_initcall(s3c2440_cpufreq_init); + +static struct subsys_interface s3c2442_cpufreq_interface = { + .name = "s3c2442_cpufreq", + .subsys = &s3c2442_subsys, + .add_dev = s3c2440_cpufreq_add, +}; + +static int s3c2442_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2442_cpufreq_interface); +} +subsys_initcall(s3c2442_cpufreq_init); diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c new file mode 100644 index 00000000000..9b7b4289d66 --- /dev/null +++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2009 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C24XX CPU Frequency scaling - debugfs status support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/err.h> + +#include <plat/cpu-freq-core.h> + +static struct dentry *dbgfs_root; +static struct dentry *dbgfs_file_io; +static struct dentry *dbgfs_file_info; +static struct dentry *dbgfs_file_board; + +#define print_ns(x) ((x) / 10), ((x) % 10) + +static void show_max(struct seq_file *seq, struct s3c_freq *f) +{ + seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n", + f->fclk, f->hclk, f->pclk, f->armclk); +} + +static int board_show(struct seq_file *seq, void *p) +{ + struct s3c_cpufreq_config *cfg; + struct s3c_cpufreq_board *brd; + + cfg = s3c_cpufreq_getconfig(); + if (!cfg) { + seq_printf(seq, "no configuration registered\n"); + return 0; + } + + brd = cfg->board; + if (!brd) { + seq_printf(seq, "no board definition set?\n"); + return 0; + } + + seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh); + seq_printf(seq, "auto_io=%u\n", brd->auto_io); + seq_printf(seq, "need_io=%u\n", brd->need_io); + + show_max(seq, &brd->max); + + + return 0; +} + +static int fops_board_open(struct inode *inode, struct file *file) +{ + return single_open(file, board_show, NULL); +} + +static const struct file_operations fops_board = { + .open = fops_board_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int info_show(struct seq_file *seq, void *p) +{ + struct s3c_cpufreq_config *cfg; + + cfg = s3c_cpufreq_getconfig(); + if (!cfg) { + seq_printf(seq, "no configuration registered\n"); + return 0; + } + + seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk); + seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n", + cfg->freq.hclk, print_ns(cfg->freq.hclk_tns)); + seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk); + seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk); + seq_printf(seq, "\n"); + + show_max(seq, &cfg->max); + + seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n", + cfg->divs.h_divisor, cfg->divs.p_divisor, + cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off"); + seq_printf(seq, "\n"); + + seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll); + + return 0; +} + +static int fops_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, info_show, NULL); +} + +static const struct file_operations fops_info = { + .open = fops_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int io_show(struct seq_file *seq, void *p) +{ + void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *); + struct s3c_cpufreq_config *cfg; + struct s3c_iotimings *iot; + union s3c_iobank *iob; + int bank; + + cfg = s3c_cpufreq_getconfig(); + if (!cfg) { + seq_printf(seq, "no configuration registered\n"); + return 0; + } + + show_bank = cfg->info->debug_io_show; + if (!show_bank) { + seq_printf(seq, "no code to show bank timing\n"); + return 0; + } + + iot = s3c_cpufreq_getiotimings(); + if (!iot) { + seq_printf(seq, "no io timings registered\n"); + return 0; + } + + seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns)); + + for (bank = 0; bank < MAX_BANKS; bank++) { + iob = &iot->bank[bank]; + + seq_printf(seq, "bank %d: ", bank); + + if (!iob->io_2410) { + seq_printf(seq, "nothing set\n"); + continue; + } + + show_bank(seq, cfg, iob); + } + + return 0; +} + +static int fops_io_open(struct inode *inode, struct file *file) +{ + return single_open(file, io_show, NULL); +} + +static const struct file_operations fops_io = { + .open = fops_io_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + + +static int __init s3c_freq_debugfs_init(void) +{ + dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL); + if (IS_ERR(dbgfs_root)) { + printk(KERN_ERR "%s: error creating debugfs root\n", __func__); + return PTR_ERR(dbgfs_root); + } + + dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root, + NULL, &fops_io); + + dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root, + NULL, &fops_info); + + dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root, + NULL, &fops_board); + + return 0; +} + +late_initcall(s3c_freq_debugfs_init); + diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c new file mode 100644 index 00000000000..227ebf7c1ee --- /dev/null +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -0,0 +1,683 @@ +/* + * Copyright (c) 2006-2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C24XX CPU Frequency scaling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/cpu.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/slab.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include <plat/cpu.h> +#include <plat/clock.h> +#include <plat/cpu-freq-core.h> + +#include <mach/regs-clock.h> + +/* note, cpufreq support deals in kHz, no Hz */ + +static struct cpufreq_driver s3c24xx_driver; +static struct s3c_cpufreq_config cpu_cur; +static struct s3c_iotimings s3c24xx_iotiming; +static struct cpufreq_frequency_table *pll_reg; +static unsigned int last_target = ~0; +static unsigned int ftab_size; +static struct cpufreq_frequency_table *ftab; + +static struct clk *_clk_mpll; +static struct clk *_clk_xtal; +static struct clk *clk_fclk; +static struct clk *clk_hclk; +static struct clk *clk_pclk; +static struct clk *clk_arm; + +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS +struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void) +{ + return &cpu_cur; +} + +struct s3c_iotimings *s3c_cpufreq_getiotimings(void) +{ + return &s3c24xx_iotiming; +} +#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS */ + +static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg) +{ + unsigned long fclk, pclk, hclk, armclk; + + cfg->freq.fclk = fclk = clk_get_rate(clk_fclk); + cfg->freq.hclk = hclk = clk_get_rate(clk_hclk); + cfg->freq.pclk = pclk = clk_get_rate(clk_pclk); + cfg->freq.armclk = armclk = clk_get_rate(clk_arm); + + cfg->pll.driver_data = __raw_readl(S3C2410_MPLLCON); + cfg->pll.frequency = fclk; + + cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10); + + cfg->divs.h_divisor = fclk / hclk; + cfg->divs.p_divisor = fclk / pclk; +} + +static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg) +{ + unsigned long pll = cfg->pll.frequency; + + cfg->freq.fclk = pll; + cfg->freq.hclk = pll / cfg->divs.h_divisor; + cfg->freq.pclk = pll / cfg->divs.p_divisor; + + /* convert hclk into 10ths of nanoseconds for io calcs */ + cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10); +} + +static inline int closer(unsigned int target, unsigned int n, unsigned int c) +{ + int diff_cur = abs(target - c); + int diff_new = abs(target - n); + + return (diff_new < diff_cur); +} + +static void s3c_cpufreq_show(const char *pfx, + struct s3c_cpufreq_config *cfg) +{ + s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n", + pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk, + cfg->freq.hclk, cfg->divs.h_divisor, + cfg->freq.pclk, cfg->divs.p_divisor); +} + +/* functions to wrapper the driver info calls to do the cpu specific work */ + +static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg) +{ + if (cfg->info->set_iotiming) + (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming); +} + +static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg) +{ + if (cfg->info->calc_iotiming) + return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming); + + return 0; +} + +static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) +{ + (cfg->info->set_refresh)(cfg); +} + +static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + (cfg->info->set_divs)(cfg); +} + +static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + return (cfg->info->calc_divs)(cfg); +} + +static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg) +{ + cfg->mpll = _clk_mpll; + (cfg->info->set_fvco)(cfg); +} + +static inline void s3c_cpufreq_resume_clocks(void) +{ + cpu_cur.info->resume_clocks(); +} + +static inline void s3c_cpufreq_updateclk(struct clk *clk, + unsigned int freq) +{ + clk_set_rate(clk, freq); +} + +static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, + unsigned int target_freq, + struct cpufreq_frequency_table *pll) +{ + struct s3c_cpufreq_freqs freqs; + struct s3c_cpufreq_config cpu_new; + unsigned long flags; + + cpu_new = cpu_cur; /* copy new from current */ + + s3c_cpufreq_show("cur", &cpu_cur); + + /* TODO - check for DMA currently outstanding */ + + cpu_new.pll = pll ? *pll : cpu_cur.pll; + + if (pll) + freqs.pll_changing = 1; + + /* update our frequencies */ + + cpu_new.freq.armclk = target_freq; + cpu_new.freq.fclk = cpu_new.pll.frequency; + + if (s3c_cpufreq_calcdivs(&cpu_new) < 0) { + printk(KERN_ERR "no divisors for %d\n", target_freq); + goto err_notpossible; + } + + s3c_freq_dbg("%s: got divs\n", __func__); + + s3c_cpufreq_calc(&cpu_new); + + s3c_freq_dbg("%s: calculated frequencies for new\n", __func__); + + if (cpu_new.freq.hclk != cpu_cur.freq.hclk) { + if (s3c_cpufreq_calcio(&cpu_new) < 0) { + printk(KERN_ERR "%s: no IO timings\n", __func__); + goto err_notpossible; + } + } + + s3c_cpufreq_show("new", &cpu_new); + + /* setup our cpufreq parameters */ + + freqs.old = cpu_cur.freq; + freqs.new = cpu_new.freq; + + freqs.freqs.old = cpu_cur.freq.armclk / 1000; + freqs.freqs.new = cpu_new.freq.armclk / 1000; + + /* update f/h/p clock settings before we issue the change + * notification, so that drivers do not need to do anything + * special if they want to recalculate on CPUFREQ_PRECHANGE. */ + + s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency); + s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk); + s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk); + s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); + + /* start the frequency change */ + cpufreq_freq_transition_begin(policy, &freqs.freqs); + + /* If hclk is staying the same, then we do not need to + * re-write the IO or the refresh timings whilst we are changing + * speed. */ + + local_irq_save(flags); + + /* is our memory clock slowing down? */ + if (cpu_new.freq.hclk < cpu_cur.freq.hclk) { + s3c_cpufreq_setrefresh(&cpu_new); + s3c_cpufreq_setio(&cpu_new); + } + + if (cpu_new.freq.fclk == cpu_cur.freq.fclk) { + /* not changing PLL, just set the divisors */ + + s3c_cpufreq_setdivs(&cpu_new); + } else { + if (cpu_new.freq.fclk < cpu_cur.freq.fclk) { + /* slow the cpu down, then set divisors */ + + s3c_cpufreq_setfvco(&cpu_new); + s3c_cpufreq_setdivs(&cpu_new); + } else { + /* set the divisors, then speed up */ + + s3c_cpufreq_setdivs(&cpu_new); + s3c_cpufreq_setfvco(&cpu_new); + } + } + + /* did our memory clock speed up */ + if (cpu_new.freq.hclk > cpu_cur.freq.hclk) { + s3c_cpufreq_setrefresh(&cpu_new); + s3c_cpufreq_setio(&cpu_new); + } + + /* update our current settings */ + cpu_cur = cpu_new; + + local_irq_restore(flags); + + /* notify everyone we've done this */ + cpufreq_freq_transition_end(policy, &freqs.freqs, 0); + + s3c_freq_dbg("%s: finished\n", __func__); + return 0; + + err_notpossible: + printk(KERN_ERR "no compatible settings for %d\n", target_freq); + return -EINVAL; +} + +/* s3c_cpufreq_target + * + * called by the cpufreq core to adjust the frequency that the CPU + * is currently running at. + */ + +static int s3c_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_frequency_table *pll; + unsigned int index; + + /* avoid repeated calls which cause a needless amout of duplicated + * logging output (and CPU time as the calculation process is + * done) */ + if (target_freq == last_target) + return 0; + + last_target = target_freq; + + s3c_freq_dbg("%s: policy %p, target %u, relation %u\n", + __func__, policy, target_freq, relation); + + if (ftab) { + if (cpufreq_frequency_table_target(policy, ftab, + target_freq, relation, + &index)) { + s3c_freq_dbg("%s: table failed\n", __func__); + return -EINVAL; + } + + s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__, + target_freq, index, ftab[index].frequency); + target_freq = ftab[index].frequency; + } + + target_freq *= 1000; /* convert target to Hz */ + + /* find the settings for our new frequency */ + + if (!pll_reg || cpu_cur.lock_pll) { + /* either we've not got any PLL values, or we've locked + * to the current one. */ + pll = NULL; + } else { + struct cpufreq_policy tmp_policy; + int ret; + + /* we keep the cpu pll table in Hz, to ensure we get an + * accurate value for the PLL output. */ + + tmp_policy.min = policy->min * 1000; + tmp_policy.max = policy->max * 1000; + tmp_policy.cpu = policy->cpu; + + /* cpufreq_frequency_table_target uses a pointer to 'index' + * which is the number of the table entry, not the value of + * the table entry's index field. */ + + ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg, + target_freq, relation, + &index); + + if (ret < 0) { + printk(KERN_ERR "%s: no PLL available\n", __func__); + goto err_notpossible; + } + + pll = pll_reg + index; + + s3c_freq_dbg("%s: target %u => %u\n", + __func__, target_freq, pll->frequency); + + target_freq = pll->frequency; + } + + return s3c_cpufreq_settarget(policy, target_freq, pll); + + err_notpossible: + printk(KERN_ERR "no compatible settings for %d\n", target_freq); + return -EINVAL; +} + +struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) +{ + struct clk *clk; + + clk = clk_get(dev, name); + if (IS_ERR(clk)) + printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name); + + return clk; +} + +static int s3c_cpufreq_init(struct cpufreq_policy *policy) +{ + policy->clk = clk_arm; + return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); +} + +static int __init s3c_cpufreq_initclks(void) +{ + _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll"); + _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal"); + clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk"); + clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk"); + clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk"); + clk_arm = s3c_cpufreq_clk_get(NULL, "armclk"); + + if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) || + IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) { + printk(KERN_ERR "%s: could not get clock(s)\n", __func__); + return -ENOENT; + } + + printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__, + clk_get_rate(clk_fclk) / 1000, + clk_get_rate(clk_hclk) / 1000, + clk_get_rate(clk_pclk) / 1000, + clk_get_rate(clk_arm) / 1000); + + return 0; +} + +#ifdef CONFIG_PM +static struct cpufreq_frequency_table suspend_pll; +static unsigned int suspend_freq; + +static int s3c_cpufreq_suspend(struct cpufreq_policy *policy) +{ + suspend_pll.frequency = clk_get_rate(_clk_mpll); + suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON); + suspend_freq = clk_get_rate(clk_arm); + + return 0; +} + +static int s3c_cpufreq_resume(struct cpufreq_policy *policy) +{ + int ret; + + s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy); + + last_target = ~0; /* invalidate last_target setting */ + + /* first, find out what speed we resumed at. */ + s3c_cpufreq_resume_clocks(); + + /* whilst we will be called later on, we try and re-set the + * cpu frequencies as soon as possible so that we do not end + * up resuming devices and then immediately having to re-set + * a number of settings once these devices have restarted. + * + * as a note, it is expected devices are not used until they + * have been un-suspended and at that time they should have + * used the updated clock settings. + */ + + ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll); + if (ret) { + printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__); + return ret; + } + + return 0; +} +#else +#define s3c_cpufreq_resume NULL +#define s3c_cpufreq_suspend NULL +#endif + +static struct cpufreq_driver s3c24xx_driver = { + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .target = s3c_cpufreq_target, + .get = cpufreq_generic_get, + .init = s3c_cpufreq_init, + .suspend = s3c_cpufreq_suspend, + .resume = s3c_cpufreq_resume, + .name = "s3c24xx", +}; + + +int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info) +{ + if (!info || !info->name) { + printk(KERN_ERR "%s: failed to pass valid information\n", + __func__); + return -EINVAL; + } + + printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n", + info->name); + + /* check our driver info has valid data */ + + BUG_ON(info->set_refresh == NULL); + BUG_ON(info->set_divs == NULL); + BUG_ON(info->calc_divs == NULL); + + /* info->set_fvco is optional, depending on whether there + * is a need to set the clock code. */ + + cpu_cur.info = info; + + /* Note, driver registering should probably update locktime */ + + return 0; +} + +int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board) +{ + struct s3c_cpufreq_board *ours; + + if (!board) { + printk(KERN_INFO "%s: no board data\n", __func__); + return -EINVAL; + } + + /* Copy the board information so that each board can make this + * initdata. */ + + ours = kzalloc(sizeof(*ours), GFP_KERNEL); + if (ours == NULL) { + printk(KERN_ERR "%s: no memory\n", __func__); + return -ENOMEM; + } + + *ours = *board; + cpu_cur.board = ours; + + return 0; +} + +static int __init s3c_cpufreq_auto_io(void) +{ + int ret; + + if (!cpu_cur.info->get_iotiming) { + printk(KERN_ERR "%s: get_iotiming undefined\n", __func__); + return -ENOENT; + } + + printk(KERN_INFO "%s: working out IO settings\n", __func__); + + ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming); + if (ret) + printk(KERN_ERR "%s: failed to get timings\n", __func__); + + return ret; +} + +/* if one or is zero, then return the other, otherwise return the min */ +#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b)) + +/** + * s3c_cpufreq_freq_min - find the minimum settings for the given freq. + * @dst: The destination structure + * @a: One argument. + * @b: The other argument. + * + * Create a minimum of each frequency entry in the 'struct s3c_freq', + * unless the entry is zero when it is ignored and the non-zero argument + * used. + */ +static void s3c_cpufreq_freq_min(struct s3c_freq *dst, + struct s3c_freq *a, struct s3c_freq *b) +{ + dst->fclk = do_min(a->fclk, b->fclk); + dst->hclk = do_min(a->hclk, b->hclk); + dst->pclk = do_min(a->pclk, b->pclk); + dst->armclk = do_min(a->armclk, b->armclk); +} + +static inline u32 calc_locktime(u32 freq, u32 time_us) +{ + u32 result; + + result = freq * time_us; + result = DIV_ROUND_UP(result, 1000 * 1000); + + return result; +} + +static void s3c_cpufreq_update_loctkime(void) +{ + unsigned int bits = cpu_cur.info->locktime_bits; + u32 rate = (u32)clk_get_rate(_clk_xtal); + u32 val; + + if (bits == 0) { + WARN_ON(1); + return; + } + + val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits; + val |= calc_locktime(rate, cpu_cur.info->locktime_m); + + printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val); + __raw_writel(val, S3C2410_LOCKTIME); +} + +static int s3c_cpufreq_build_freq(void) +{ + int size, ret; + + if (!cpu_cur.info->calc_freqtable) + return -EINVAL; + + kfree(ftab); + ftab = NULL; + + size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0); + size++; + + ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL); + if (!ftab) { + printk(KERN_ERR "%s: no memory for tables\n", __func__); + return -ENOMEM; + } + + ftab_size = size; + + ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size); + s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END); + + return 0; +} + +static int __init s3c_cpufreq_initcall(void) +{ + int ret = 0; + + if (cpu_cur.info && cpu_cur.board) { + ret = s3c_cpufreq_initclks(); + if (ret) + goto out; + + /* get current settings */ + s3c_cpufreq_getcur(&cpu_cur); + s3c_cpufreq_show("cur", &cpu_cur); + + if (cpu_cur.board->auto_io) { + ret = s3c_cpufreq_auto_io(); + if (ret) { + printk(KERN_ERR "%s: failed to get io timing\n", + __func__); + goto out; + } + } + + if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) { + printk(KERN_ERR "%s: no IO support registered\n", + __func__); + ret = -EINVAL; + goto out; + } + + if (!cpu_cur.info->need_pll) + cpu_cur.lock_pll = 1; + + s3c_cpufreq_update_loctkime(); + + s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max, + &cpu_cur.info->max); + + if (cpu_cur.info->calc_freqtable) + s3c_cpufreq_build_freq(); + + ret = cpufreq_register_driver(&s3c24xx_driver); + } + + out: + return ret; +} + +late_initcall(s3c_cpufreq_initcall); + +/** + * s3c_plltab_register - register CPU PLL table. + * @plls: The list of PLL entries. + * @plls_no: The size of the PLL entries @plls. + * + * Register the given set of PLLs with the system. + */ +int __init s3c_plltab_register(struct cpufreq_frequency_table *plls, + unsigned int plls_no) +{ + struct cpufreq_frequency_table *vals; + unsigned int size; + + size = sizeof(*vals) * (plls_no + 1); + + vals = kzalloc(size, GFP_KERNEL); + if (vals) { + memcpy(vals, plls, size); + pll_reg = vals; + + /* write a terminating entry, we don't store it in the + * table that is stored in the kernel */ + vals += plls_no; + vals->frequency = CPUFREQ_TABLE_END; + + printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no); + } else + printk(KERN_ERR "cpufreq: no memory for PLL tables\n"); + + return vals ? 0 : -ENOMEM; +} diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index 27cacb52479..176e84cc399 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c @@ -19,7 +19,6 @@ #include <linux/regulator/consumer.h> #include <linux/module.h> -static struct clk *armclk; static struct regulator *vddarm; static unsigned long regulator_latency; @@ -38,111 +37,73 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = { }; static struct cpufreq_frequency_table s3c64xx_freq_table[] = { - { 0, 66000 }, - { 0, 100000 }, - { 0, 133000 }, - { 1, 200000 }, - { 1, 222000 }, - { 1, 266000 }, - { 2, 333000 }, - { 2, 400000 }, - { 2, 532000 }, - { 2, 533000 }, - { 3, 667000 }, - { 4, 800000 }, - { 0, CPUFREQ_TABLE_END }, + { 0, 0, 66000 }, + { 0, 0, 100000 }, + { 0, 0, 133000 }, + { 0, 1, 200000 }, + { 0, 1, 222000 }, + { 0, 1, 266000 }, + { 0, 2, 333000 }, + { 0, 2, 400000 }, + { 0, 2, 532000 }, + { 0, 2, 533000 }, + { 0, 3, 667000 }, + { 0, 4, 800000 }, + { 0, 0, CPUFREQ_TABLE_END }, }; #endif -static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table); -} - -static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) -{ - if (cpu != 0) - return 0; - - return clk_get_rate(armclk) / 1000; -} - static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int index) { - int ret; - unsigned int i; - struct cpufreq_freqs freqs; struct s3c64xx_dvfs *dvfs; + unsigned int old_freq, new_freq; + int ret; - ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table, - target_freq, relation, &i); - if (ret != 0) - return ret; - - freqs.old = clk_get_rate(armclk) / 1000; - freqs.new = s3c64xx_freq_table[i].frequency; - freqs.flags = 0; - dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index]; - - if (freqs.old == freqs.new) - return 0; - - pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + old_freq = clk_get_rate(policy->clk) / 1000; + new_freq = s3c64xx_freq_table[index].frequency; + dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data]; #ifdef CONFIG_REGULATOR - if (vddarm && freqs.new > freqs.old) { + if (vddarm && new_freq > old_freq) { ret = regulator_set_voltage(vddarm, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { pr_err("Failed to set VDDARM for %dkHz: %d\n", - freqs.new, ret); - goto err; + new_freq, ret); + return ret; } } #endif - ret = clk_set_rate(armclk, freqs.new * 1000); + ret = clk_set_rate(policy->clk, new_freq * 1000); if (ret < 0) { pr_err("Failed to set rate %dkHz: %d\n", - freqs.new, ret); - goto err; + new_freq, ret); + return ret; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - #ifdef CONFIG_REGULATOR - if (vddarm && freqs.new < freqs.old) { + if (vddarm && new_freq < old_freq) { ret = regulator_set_voltage(vddarm, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { pr_err("Failed to set VDDARM for %dkHz: %d\n", - freqs.new, ret); - goto err_clk; + new_freq, ret); + if (clk_set_rate(policy->clk, old_freq * 1000) < 0) + pr_err("Failed to restore original clock rate\n"); + + return ret; } } #endif pr_debug("Set actual frequency %lukHz\n", - clk_get_rate(armclk) / 1000); + clk_get_rate(policy->clk) / 1000); return 0; - -err_clk: - if (clk_set_rate(armclk, freqs.old * 1000) < 0) - pr_err("Failed to restore original clock rate\n"); -err: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return ret; } #ifdef CONFIG_REGULATOR @@ -157,12 +118,11 @@ static void __init s3c64xx_cpufreq_config_regulator(void) pr_err("Unable to check supported voltages\n"); } - freq = s3c64xx_freq_table; - while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) { - if (freq->frequency == CPUFREQ_ENTRY_INVALID) - continue; + if (!count) + goto out; - dvfs = &s3c64xx_dvfs_table[freq->index]; + cpufreq_for_each_valid_entry(freq, s3c64xx_freq_table) { + dvfs = &s3c64xx_dvfs_table[freq->driver_data]; found = 0; for (i = 0; i < count; i++) { @@ -176,10 +136,9 @@ static void __init s3c64xx_cpufreq_config_regulator(void) freq->frequency); freq->frequency = CPUFREQ_ENTRY_INVALID; } - - freq++; } +out: /* Guess based on having to do an I2C/SPI write; in future we * will be able to query the regulator performance here. */ regulator_latency = 1 * 1000 * 1000; @@ -199,11 +158,11 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) return -ENODEV; } - armclk = clk_get(NULL, "armclk"); - if (IS_ERR(armclk)) { + policy->clk = clk_get(NULL, "armclk"); + if (IS_ERR(policy->clk)) { pr_err("Unable to obtain ARMCLK: %ld\n", - PTR_ERR(armclk)); - return PTR_ERR(armclk); + PTR_ERR(policy->clk)); + return PTR_ERR(policy->clk); } #ifdef CONFIG_REGULATOR @@ -218,12 +177,11 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) } #endif - freq = s3c64xx_freq_table; - while (freq->frequency != CPUFREQ_TABLE_END) { + cpufreq_for_each_entry(freq, s3c64xx_freq_table) { unsigned long r; /* Check for frequencies we can generate */ - r = clk_round_rate(armclk, freq->frequency * 1000); + r = clk_round_rate(policy->clk, freq->frequency * 1000); r /= 1000; if (r != freq->frequency) { pr_debug("%dkHz unsupported by clock\n", @@ -233,37 +191,31 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) /* If we have no regulator then assume startup * frequency is the maximum we can support. */ - if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0)) + if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000) freq->frequency = CPUFREQ_ENTRY_INVALID; - - freq++; } - policy->cur = clk_get_rate(armclk) / 1000; - /* Datasheet says PLL stabalisation time (if we were to use * the PLLs, which we don't currently) is ~300us worst case, * but add some fudge. */ - policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency; - - ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); + ret = cpufreq_generic_init(policy, s3c64xx_freq_table, + (500 * 1000) + regulator_latency); if (ret != 0) { pr_err("Failed to configure frequency table: %d\n", ret); regulator_put(vddarm); - clk_put(armclk); + clk_put(policy->clk); } return ret; } static struct cpufreq_driver s3c64xx_cpufreq_driver = { - .owner = THIS_MODULE, - .flags = 0, - .verify = s3c64xx_cpufreq_verify_speed, - .target = s3c64xx_cpufreq_set_target, - .get = s3c64xx_cpufreq_get_speed, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = s3c64xx_cpufreq_set_target, + .get = cpufreq_generic_get, .init = s3c64xx_cpufreq_driver_init, .name = "s3c", }; diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 5c775707379..19a10b89fef 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -18,15 +18,12 @@ #include <linux/cpufreq.h> #include <linux/reboot.h> #include <linux/regulator/consumer.h> -#include <linux/suspend.h> #include <mach/map.h> #include <mach/regs-clock.h> -static struct clk *cpu_clk; static struct clk *dmc0_clk; static struct clk *dmc1_clk; -static struct cpufreq_freqs freqs; static DEFINE_MUTEX(set_freq_lock); /* APLL M,P,S values for 1G/800Mhz */ @@ -36,16 +33,7 @@ static DEFINE_MUTEX(set_freq_lock); /* Use 800MHz when entering sleep mode */ #define SLEEP_FREQ (800 * 1000) -/* - * relation has an additional symantics other than the standard of cpufreq - * DISALBE_FURTHER_CPUFREQ: disable further access to target - * ENABLE_FURTUER_CPUFREQ: enable access to target - */ -enum cpufreq_access { - DISABLE_FURTHER_CPUFREQ = 0x10, - ENABLE_FURTHER_CPUFREQ = 0x20, -}; - +/* Tracks if cpu freqency can be updated anymore */ static bool no_cpufreq_access; /* @@ -76,12 +64,12 @@ enum s5pv210_dmc_port { }; static struct cpufreq_frequency_table s5pv210_freq_table[] = { - {L0, 1000*1000}, - {L1, 800*1000}, - {L2, 400*1000}, - {L3, 200*1000}, - {L4, 100*1000}, - {0, CPUFREQ_TABLE_END}, + {0, L0, 1000*1000}, + {0, L1, 800*1000}, + {0, L2, 400*1000}, + {0, L3, 200*1000}, + {0, L4, 100*1000}, + {0, 0, CPUFREQ_TABLE_END}, }; static struct regulator *arm_regulator; @@ -174,68 +162,32 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) __raw_writel(tmp1, reg); } -static int s5pv210_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu) - return -EINVAL; - - return cpufreq_frequency_table_verify(policy, s5pv210_freq_table); -} - -static unsigned int s5pv210_getspeed(unsigned int cpu) -{ - if (cpu) - return 0; - - return clk_get_rate(cpu_clk) / 1000; -} - -static int s5pv210_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) { unsigned long reg; - unsigned int index, priv_index; + unsigned int priv_index; unsigned int pll_changing = 0; unsigned int bus_speed_changing = 0; + unsigned int old_freq, new_freq; int arm_volt, int_volt; int ret = 0; mutex_lock(&set_freq_lock); - if (relation & ENABLE_FURTHER_CPUFREQ) - no_cpufreq_access = false; - if (no_cpufreq_access) { -#ifdef CONFIG_PM_VERBOSE - pr_err("%s:%d denied access to %s as it is disabled" - "temporarily\n", __FILE__, __LINE__, __func__); -#endif - ret = -EINVAL; - goto exit; - } - - if (relation & DISABLE_FURTHER_CPUFREQ) - no_cpufreq_access = true; - - relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ); - - freqs.old = s5pv210_getspeed(0); - - if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, - target_freq, relation, &index)) { + pr_err("Denied access to %s as it is disabled temporarily\n", + __func__); ret = -EINVAL; goto exit; } - freqs.new = s5pv210_freq_table[index].frequency; - - if (freqs.new == freqs.old) - goto exit; + old_freq = policy->cur; + new_freq = s5pv210_freq_table[index].frequency; /* Finding current running level index */ if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, - freqs.old, relation, &priv_index)) { + old_freq, CPUFREQ_RELATION_H, + &priv_index)) { ret = -EINVAL; goto exit; } @@ -243,7 +195,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, arm_volt = dvs_conf[index].arm_volt; int_volt = dvs_conf[index].int_volt; - if (freqs.new > freqs.old) { + if (new_freq > old_freq) { ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt_max); if (ret) @@ -255,8 +207,6 @@ static int s5pv210_target(struct cpufreq_policy *policy, goto exit; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* Check if there need to change PLL */ if ((index == L0) || (priv_index == L0)) pll_changing = 1; @@ -467,9 +417,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, } } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - if (freqs.new < freqs.old) { + if (new_freq < old_freq) { regulator_set_voltage(int_regulator, int_volt, int_volt_max); @@ -484,18 +432,6 @@ exit: return ret; } -#ifdef CONFIG_PM -static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy) -{ - return 0; -} - -static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy) -{ - return 0; -} -#endif - static int check_mem_type(void __iomem *dmc_reg) { unsigned long val; @@ -511,9 +447,9 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) unsigned long mem_type; int ret; - cpu_clk = clk_get(NULL, "armclk"); - if (IS_ERR(cpu_clk)) - return PTR_ERR(cpu_clk); + policy->clk = clk_get(NULL, "armclk"); + if (IS_ERR(policy->clk)) + return PTR_ERR(policy->clk); dmc0_clk = clk_get(NULL, "sclk_dmc0"); if (IS_ERR(dmc0_clk)) { @@ -551,75 +487,42 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); - policy->cur = policy->min = policy->max = s5pv210_getspeed(0); - - cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu); - - policy->cpuinfo.transition_latency = 40000; - - return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table); + policy->suspend_freq = SLEEP_FREQ; + return cpufreq_generic_init(policy, s5pv210_freq_table, 40000); out_dmc1: clk_put(dmc0_clk); out_dmc0: - clk_put(cpu_clk); + clk_put(policy->clk); return ret; } -static int s5pv210_cpufreq_notifier_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - int ret; - - switch (event) { - case PM_SUSPEND_PREPARE: - ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, - DISABLE_FURTHER_CPUFREQ); - if (ret < 0) - return NOTIFY_BAD; - - return NOTIFY_OK; - case PM_POST_RESTORE: - case PM_POST_SUSPEND: - cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, - ENABLE_FURTHER_CPUFREQ); - - return NOTIFY_OK; - } - - return NOTIFY_DONE; -} - static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr) { int ret; - ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, - DISABLE_FURTHER_CPUFREQ); + ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); if (ret < 0) return NOTIFY_BAD; + no_cpufreq_access = true; return NOTIFY_DONE; } static struct cpufreq_driver s5pv210_driver = { - .flags = CPUFREQ_STICKY, - .verify = s5pv210_verify_speed, - .target = s5pv210_target, - .get = s5pv210_getspeed, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = s5pv210_target, + .get = cpufreq_generic_get, .init = s5pv210_cpu_init, .name = "s5pv210", #ifdef CONFIG_PM - .suspend = s5pv210_cpufreq_suspend, - .resume = s5pv210_cpufreq_resume, + .suspend = cpufreq_generic_suspend, + .resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */ #endif }; -static struct notifier_block s5pv210_cpufreq_notifier = { - .notifier_call = s5pv210_cpufreq_notifier_event, -}; - static struct notifier_block s5pv210_cpufreq_reboot_notifier = { .notifier_call = s5pv210_cpufreq_reboot_notifier_event, }; @@ -639,7 +542,6 @@ static int __init s5pv210_cpufreq_init(void) return PTR_ERR(int_regulator); } - register_pm_notifier(&s5pv210_cpufreq_notifier); register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier); return cpufreq_register_driver(&s5pv210_driver); diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c index cff18e87ca5..728eab77e8e 100644 --- a/drivers/cpufreq/sa1100-cpufreq.c +++ b/drivers/cpufreq/sa1100-cpufreq.c @@ -177,60 +177,33 @@ static void sa1100_update_dram_timings(int current_speed, int new_speed) } } -static int sa1100_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) { unsigned int cur = sa11x0_getspeed(0); - unsigned int new_ppcr; - struct cpufreq_freqs freqs; - - new_ppcr = sa11x0_freq_to_ppcr(target_freq); - switch (relation) { - case CPUFREQ_RELATION_L: - if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max) - new_ppcr--; - break; - case CPUFREQ_RELATION_H: - if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) && - (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min)) - new_ppcr--; - break; - } - - freqs.old = cur; - freqs.new = sa11x0_ppcr_to_freq(new_ppcr); + unsigned int new_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + new_freq = sa11x0_freq_table[ppcr].frequency; - if (freqs.new > cur) - sa1100_update_dram_timings(cur, freqs.new); + if (new_freq > cur) + sa1100_update_dram_timings(cur, new_freq); - PPCR = new_ppcr; + PPCR = ppcr; - if (freqs.new < cur) - sa1100_update_dram_timings(cur, freqs.new); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + if (new_freq < cur) + sa1100_update_dram_timings(cur, new_freq); return 0; } static int __init sa1100_cpu_init(struct cpufreq_policy *policy) { - if (policy->cpu != 0) - return -EINVAL; - policy->cur = policy->min = policy->max = sa11x0_getspeed(0); - policy->cpuinfo.min_freq = 59000; - policy->cpuinfo.max_freq = 287000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; + return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); } static struct cpufreq_driver sa1100_driver __refdata = { - .flags = CPUFREQ_STICKY, - .verify = sa11x0_verify_speed, - .target = sa1100_target, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = sa1100_target, .get = sa11x0_getspeed, .init = sa1100_cpu_init, .name = "sa1100", diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 39c90b6f428..b5befc21117 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -229,36 +229,14 @@ sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram) /* * Ok, set the CPU frequency. */ -static int sa1110_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) { struct sdram_params *sdram = &sdram_params; - struct cpufreq_freqs freqs; struct sdram_info sd; unsigned long flags; - unsigned int ppcr, unused; - - switch (relation) { - case CPUFREQ_RELATION_L: - ppcr = sa11x0_freq_to_ppcr(target_freq); - if (sa11x0_ppcr_to_freq(ppcr) > policy->max) - ppcr--; - break; - case CPUFREQ_RELATION_H: - ppcr = sa11x0_freq_to_ppcr(target_freq); - if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) && - (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min)) - ppcr--; - break; - default: - return -EINVAL; - } - - freqs.old = sa11x0_getspeed(0); - freqs.new = sa11x0_ppcr_to_freq(ppcr); + unsigned int unused; - sdram_calculate_timing(&sd, freqs.new, sdram); + sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram); #if 0 /* @@ -277,8 +255,6 @@ static int sa1110_target(struct cpufreq_policy *policy, sd.mdcas[2] = 0xaaaaaaaa; #endif - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* * The clock could be going away for some time. Set the SDRAMs * to refresh rapidly (every 64 memory clock cycles). To get @@ -323,30 +299,22 @@ static int sa1110_target(struct cpufreq_policy *policy, /* * Now, return the SDRAM refresh back to normal. */ - sdram_update_refresh(freqs.new, sdram); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram); return 0; } static int __init sa1110_cpu_init(struct cpufreq_policy *policy) { - if (policy->cpu != 0) - return -EINVAL; - policy->cur = policy->min = policy->max = sa11x0_getspeed(0); - policy->cpuinfo.min_freq = 59000; - policy->cpuinfo.max_freq = 287000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; + return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); } /* sa1110_driver needs __refdata because it must remain after init registers * it with cpufreq_register_driver() */ static struct cpufreq_driver sa1110_driver __refdata = { - .flags = CPUFREQ_STICKY, - .verify = sa11x0_verify_speed, - .target = sa1110_target, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = sa1110_target, .get = sa11x0_getspeed, .init = sa1110_cpu_init, .name = "sa1110", @@ -381,7 +349,7 @@ static int __init sa1110_clk_init(void) name = "K4S641632D"; if (machine_is_h3100()) name = "KM416S4030CT"; - if (machine_is_jornada720()) + if (machine_is_jornada720() || machine_is_h3600()) name = "K4S281632B-1H"; if (machine_is_nanoengine()) name = "MT48LC8M16A2TG-75"; diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index f740b134d27..ac84e481801 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c @@ -33,9 +33,9 @@ static __u8 __iomem *cpuctl; #define PFX "sc520_freq: " static struct cpufreq_frequency_table sc520_freq_table[] = { - {0x01, 100000}, - {0x02, 133000}, - {0, CPUFREQ_TABLE_END}, + {0, 0x01, 100000}, + {0, 0x02, 133000}, + {0, 0, CPUFREQ_TABLE_END}, }; static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) @@ -53,52 +53,21 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) } } -static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) +static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state) { - struct cpufreq_freqs freqs; u8 clockspeed_reg; - freqs.old = sc520_freq_get_cpu_frequency(0); - freqs.new = sc520_freq_table[state].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - pr_debug("attempting to set frequency to %i kHz\n", - sc520_freq_table[state].frequency); - local_irq_disable(); clockspeed_reg = *cpuctl & ~0x03; - *cpuctl = clockspeed_reg | sc520_freq_table[state].index; + *cpuctl = clockspeed_reg | sc520_freq_table[state].driver_data; local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int sc520_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); -} - -static int sc520_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target(policy, sc520_freq_table, - target_freq, relation, &newstate)) - return -EINVAL; - - sc520_freq_set_cpu_state(policy, newstate); - return 0; } - /* * Module init and exit code */ @@ -106,7 +75,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy, static int sc520_freq_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); - int result; /* capability check */ if (c->x86_vendor != X86_VENDOR_AMD || @@ -115,40 +83,18 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = sc520_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); - if (result) - return result; - - cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); - - return 0; -} - -static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_table_validate_and_show(policy, sc520_freq_table); } -static struct freq_attr *sc520_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - static struct cpufreq_driver sc520_freq_driver = { .get = sc520_freq_get_cpu_frequency, - .verify = sc520_freq_verify, - .target = sc520_freq_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = sc520_freq_target, .init = sc520_freq_cpu_init, - .exit = sc520_freq_cpu_exit, .name = "sc520_freq", - .owner = THIS_MODULE, - .attr = sc520_freq_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id sc520_ids[] = { diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 73adb64651e..86628e22b2a 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -68,10 +68,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, freqs.new = (freq + 500) / 1000; freqs.flags = 0; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); set_cpus_allowed_ptr(current, &cpus_allowed); clk_set_rate(cpuclk, freq); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); dev_dbg(dev, "set frequency %lu Hz\n", freq); @@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy) if (freq_table) return cpufreq_frequency_table_verify(policy, freq_table); - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_cpu_limits(policy); policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - + cpufreq_verify_within_cpu_limits(policy); return 0; } @@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) return PTR_ERR(cpuclk); } - policy->cur = sh_cpufreq_get(cpu); - freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; if (freq_table) { int result; - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!result) - cpufreq_frequency_table_get_attr(freq_table, cpu); + result = cpufreq_table_validate_and_show(policy, freq_table); + if (result) + return result; } else { dev_notice(dev, "no frequency table found, falling back " "to rate rounding.\n"); @@ -148,26 +143,19 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) unsigned int cpu = policy->cpu; struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - cpufreq_frequency_table_put_attr(cpu); clk_put(cpuclk); return 0; } -static struct freq_attr *sh_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver sh_cpufreq_driver = { - .owner = THIS_MODULE, .name = "sh", .get = sh_cpufreq_get, .target = sh_cpufreq_target, .verify = sh_cpufreq_verify, .init = sh_cpufreq_cpu_init, .exit = sh_cpufreq_cpu_exit, - .attr = sh_freq_attr, + .attr = cpufreq_generic_attr, }; static int __init sh_cpufreq_module_init(void) diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index 306ae462bba..b73feeb666f 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c @@ -245,14 +245,12 @@ static unsigned int us2e_freq_get(unsigned int cpu) return clock_tick / estar_to_divisor(estar); } -static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, - unsigned int index) +static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) { unsigned int cpu = policy->cpu; unsigned long new_bits, new_freq; unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -266,41 +264,15 @@ static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, old_divisor = estar_to_divisor(estar); - freqs.old = clock_tick / old_divisor; - freqs.new = new_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - if (old_divisor != divisor) us2e_transition(estar, new_bits, clock_tick * 1000, old_divisor, divisor); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us2e_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us2e_freq_table[policy->cpu].table[0], - target_freq, relation, &new_index)) - return -EINVAL; - - us2e_set_cpu_divider_index(policy, new_index); return 0; } -static int us2e_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us2e_freq_table[policy->cpu].table[0]); -} - static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; @@ -308,29 +280,29 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) struct cpufreq_frequency_table *table = &us2e_freq_table[cpu].table[0]; - table[0].index = 0; + table[0].driver_data = 0; table[0].frequency = clock_tick / 1; - table[1].index = 1; + table[1].driver_data = 1; table[1].frequency = clock_tick / 2; - table[2].index = 2; + table[2].driver_data = 2; table[2].frequency = clock_tick / 4; - table[2].index = 3; + table[2].driver_data = 3; table[2].frequency = clock_tick / 6; - table[2].index = 4; + table[2].driver_data = 4; table[2].frequency = clock_tick / 8; - table[2].index = 5; + table[2].driver_data = 5; table[3].frequency = CPUFREQ_TABLE_END; policy->cpuinfo.transition_latency = 0; policy->cur = clock_tick; - return cpufreq_frequency_table_cpuinfo(policy, table); + return cpufreq_table_validate_and_show(policy, table); } static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us2e_driver) - us2e_set_cpu_divider_index(policy, 0); + us2e_freq_target(policy, 0); return 0; } @@ -351,22 +323,20 @@ static int __init us2e_freq_init(void) struct cpufreq_driver *driver; ret = -ENOMEM; - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); + driver = kzalloc(sizeof(*driver), GFP_KERNEL); if (!driver) goto err_out; - us2e_freq_table = kzalloc( - (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), + us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)), GFP_KERNEL); if (!us2e_freq_table) goto err_out; driver->init = us2e_freq_cpu_init; - driver->verify = us2e_freq_verify; - driver->target = us2e_freq_target; + driver->verify = cpufreq_generic_frequency_table_verify; + driver->target_index = us2e_freq_target; driver->get = us2e_freq_get; driver->exit = us2e_freq_cpu_exit; - driver->owner = THIS_MODULE, strcpy(driver->name, "UltraSPARC-IIe"); cpufreq_us2e_driver = driver; diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index c71ee142347..9bb42ba50ef 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -93,13 +93,11 @@ static unsigned int us3_freq_get(unsigned int cpu) return ret; } -static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, - unsigned int index) +static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index) { unsigned int cpu = policy->cpu; unsigned long new_bits, new_freq, reg; cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -125,43 +123,15 @@ static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, reg = read_safari_cfg(); - freqs.old = get_current_freq(cpu, reg); - freqs.new = new_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - reg &= ~SAFARI_CFG_DIV_MASK; reg |= new_bits; write_safari_cfg(reg); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us3_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us3_freq_table[policy->cpu].table[0], - target_freq, - relation, - &new_index)) - return -EINVAL; - - us3_set_cpu_divider_index(policy, new_index); return 0; } -static int us3_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us3_freq_table[policy->cpu].table[0]); -} - static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; @@ -169,25 +139,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) struct cpufreq_frequency_table *table = &us3_freq_table[cpu].table[0]; - table[0].index = 0; + table[0].driver_data = 0; table[0].frequency = clock_tick / 1; - table[1].index = 1; + table[1].driver_data = 1; table[1].frequency = clock_tick / 2; - table[2].index = 2; + table[2].driver_data = 2; table[2].frequency = clock_tick / 32; - table[3].index = 0; + table[3].driver_data = 0; table[3].frequency = CPUFREQ_TABLE_END; policy->cpuinfo.transition_latency = 0; policy->cur = clock_tick; - return cpufreq_frequency_table_cpuinfo(policy, table); + return cpufreq_table_validate_and_show(policy, table); } static int us3_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us3_driver) - us3_set_cpu_divider_index(policy, 0); + us3_freq_target(policy, 0); return 0; } @@ -212,22 +182,20 @@ static int __init us3_freq_init(void) struct cpufreq_driver *driver; ret = -ENOMEM; - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); + driver = kzalloc(sizeof(*driver), GFP_KERNEL); if (!driver) goto err_out; - us3_freq_table = kzalloc( - (NR_CPUS * sizeof(struct us3_freq_percpu_info)), + us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)), GFP_KERNEL); if (!us3_freq_table) goto err_out; driver->init = us3_freq_cpu_init; - driver->verify = us3_freq_verify; - driver->target = us3_freq_target; + driver->verify = cpufreq_generic_frequency_table_verify; + driver->target_index = us3_freq_target; driver->get = us3_freq_get; driver->exit = us3_freq_cpu_exit; - driver->owner = THIS_MODULE, strcpy(driver->name, "UltraSPARC-III"); cpufreq_us3_driver = driver; diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 156829f4576..38678396636 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -18,7 +18,8 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> @@ -30,16 +31,6 @@ static struct { u32 cnt; } spear_cpufreq; -static int spear_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); -} - -static unsigned int spear_cpufreq_get(unsigned int cpu) -{ - return clk_get_rate(spear_cpufreq.clk) / 1000; -} - static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq) { struct clk *sys_pclk; @@ -110,20 +101,14 @@ static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq) } static int spear_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int index) { - struct cpufreq_freqs freqs; - unsigned long newfreq; + long newfreq; struct clk *srcclk; - int index, ret, mult = 1; - - if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl, - target_freq, relation, &index)) - return -EINVAL; - - freqs.old = spear_cpufreq_get(0); + int ret, mult = 1; newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; + if (of_machine_is_compatible("st,spear1340")) { /* * SPEAr1340 is special in the sense that due to the possibility @@ -149,73 +134,40 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, } newfreq = clk_round_rate(srcclk, newfreq * mult); - if (newfreq < 0) { + if (newfreq <= 0) { pr_err("clk_round_rate failed for cpu src clock\n"); return newfreq; } - freqs.new = newfreq / 1000; - freqs.new /= mult; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - if (mult == 2) ret = spear1340_set_cpu_rate(srcclk, newfreq); else ret = clk_set_rate(spear_cpufreq.clk, newfreq); - /* Get current rate after clk_set_rate, in case of failure */ - if (ret) { + if (ret) pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret); - freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; - } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } static int spear_cpufreq_init(struct cpufreq_policy *policy) { - int ret; - - ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl); - if (ret) { - pr_err("cpufreq_frequency_table_cpuinfo() failed"); - return ret; - } - - cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu); - policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; - policy->cur = spear_cpufreq_get(0); - - cpumask_setall(policy->cpus); - - return 0; -} - -static int spear_cpufreq_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + policy->clk = spear_cpufreq.clk; + return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl, + spear_cpufreq.transition_latency); } -static struct freq_attr *spear_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver spear_cpufreq_driver = { .name = "cpufreq-spear", - .flags = CPUFREQ_STICKY, - .verify = spear_cpufreq_verify, - .target = spear_cpufreq_target, - .get = spear_cpufreq_get, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = spear_cpufreq_target, + .get = cpufreq_generic_get, .init = spear_cpufreq_init, - .exit = spear_cpufreq_exit, - .attr = spear_cpufreq_attr, + .attr = cpufreq_generic_attr, }; -static int spear_cpufreq_driver_init(void) +static int spear_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; const struct property *prop; @@ -223,7 +175,7 @@ static int spear_cpufreq_driver_init(void) const __be32 *val; int cnt, i, ret; - np = of_find_node_by_path("/cpus/cpu@0"); + np = of_cpu_device_node_get(0); if (!np) { pr_err("No cpu node found"); return -ENODEV; @@ -243,18 +195,15 @@ static int spear_cpufreq_driver_init(void) cnt = prop->length / sizeof(u32); val = prop->value; - freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); + freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); if (!freq_tbl) { ret = -ENOMEM; goto out_put_node; } - for (i = 0; i < cnt; i++) { - freq_tbl[i].index = i; + for (i = 0; i < cnt; i++) freq_tbl[i].frequency = be32_to_cpup(val++); - } - freq_tbl[i].index = i; freq_tbl[i].frequency = CPUFREQ_TABLE_END; spear_cpufreq.freq_tbl = freq_tbl; @@ -283,7 +232,15 @@ out_put_node: of_node_put(np); return ret; } -late_initcall(spear_cpufreq_driver_init); + +static struct platform_driver spear_cpufreq_platdrv = { + .driver = { + .name = "spear-cpufreq", + .owner = THIS_MODULE, + }, + .probe = spear_cpufreq_probe, +}; +module_platform_driver(spear_cpufreq_platdrv); MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>"); MODULE_DESCRIPTION("SPEAr CPUFreq driver"); diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 618e6f417b1..7d4a3157160 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -28,7 +28,7 @@ #include <asm/cpu_device_id.h> #define PFX "speedstep-centrino: " -#define MAINTAINER "cpufreq@vger.kernel.org" +#define MAINTAINER "linux-pm@vger.kernel.org" #define INTEL_MSR_RANGE (0xffff) @@ -79,11 +79,11 @@ static struct cpufreq_driver centrino_driver; /* Computes the correct form for IA32_PERF_CTL MSR for a particular frequency/voltage operating point; frequency in MHz, volts in mV. - This is stored as "index" in the structure. */ + This is stored as "driver_data" in the structure. */ #define OP(mhz, mv) \ { \ .frequency = (mhz) * 1000, \ - .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \ + .driver_data = (((mhz)/100) << 8) | ((mv - 700) / 16) \ } /* @@ -307,7 +307,7 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) per_cpu(centrino_model, cpu)->op_points[i].frequency != CPUFREQ_TABLE_END; i++) { - if (msr == per_cpu(centrino_model, cpu)->op_points[i].index) + if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) return per_cpu(centrino_model, cpu)-> op_points[i].frequency; } @@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu) static int centrino_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); - unsigned freq; unsigned l, h; - int ret; int i; /* Only Intel makes Enhanced Speedstep-capable CPUs */ @@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) return -ENODEV; } - if (centrino_cpu_init_table(policy)) { + if (centrino_cpu_init_table(policy)) return -ENODEV; - } /* Check to see if Enhanced SpeedStep is enabled, and try to enable it if not. */ @@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) } } - freq = get_cur_freq(policy->cpu); policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ - policy->cur = freq; - - pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); - ret = cpufreq_frequency_table_cpuinfo(policy, + return cpufreq_table_validate_and_show(policy, per_cpu(centrino_model, policy->cpu)->op_points); - if (ret) - return (ret); - - cpufreq_frequency_table_get_attr( - per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); - - return 0; } static int centrino_cpu_exit(struct cpufreq_policy *policy) @@ -420,44 +406,24 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) if (!per_cpu(centrino_model, cpu)) return -ENODEV; - cpufreq_frequency_table_put_attr(cpu); - per_cpu(centrino_model, cpu) = NULL; return 0; } /** - * centrino_verify - verifies a new CPUFreq policy - * @policy: new policy - * - * Limit must be within this model's frequency range at least one - * border included. - */ -static int centrino_verify (struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - per_cpu(centrino_model, policy->cpu)->op_points); -} - -/** * centrino_setpolicy - set a new CPUFreq policy * @policy: new policy - * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency - * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @index: index of target frequency * * Sets a new CPUFreq policy. */ -static int centrino_target (struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int centrino_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned int newstate = 0; unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; - struct cpufreq_freqs freqs; int retval = 0; - unsigned int j, first_cpu, tmp; + unsigned int j, first_cpu; + struct cpufreq_frequency_table *op_points; cpumask_var_t covered_cpus; if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) @@ -468,16 +434,8 @@ static int centrino_target (struct cpufreq_policy *policy, goto out; } - if (unlikely(cpufreq_frequency_table_target(policy, - per_cpu(centrino_model, cpu)->op_points, - target_freq, - relation, - &newstate))) { - retval = -EINVAL; - goto out; - } - first_cpu = 1; + op_points = &per_cpu(centrino_model, cpu)->op_points[index]; for_each_cpu(j, policy->cpus) { int good_cpu; @@ -501,7 +459,7 @@ static int centrino_target (struct cpufreq_policy *policy, break; } - msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; + msr = op_points->driver_data; if (first_cpu) { rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); @@ -512,15 +470,6 @@ static int centrino_target (struct cpufreq_policy *policy, goto out; } - freqs.old = extract_clock(oldmsr, cpu, 0); - freqs.new = extract_clock(msr, cpu, 0); - - pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", - target_freq, freqs.old, freqs.new, msr); - - cpufreq_notify_transition(policy, &freqs, - CPUFREQ_PRECHANGE); - first_cpu = 0; /* all but 16 LSB are reserved, treat them with care */ oldmsr &= ~0xffff; @@ -535,8 +484,6 @@ static int centrino_target (struct cpufreq_policy *policy, cpumask_set_cpu(j, covered_cpus); } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - if (unlikely(retval)) { /* * We have failed halfway through the frequency change. @@ -547,12 +494,6 @@ static int centrino_target (struct cpufreq_policy *policy, for_each_cpu(j, covered_cpus) wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); - - tmp = freqs.new; - freqs.new = freqs.old; - freqs.old = tmp; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); } retval = 0; @@ -561,21 +502,15 @@ out: return retval; } -static struct freq_attr* centrino_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver centrino_driver = { .name = "centrino", /* should be speedstep-centrino, but there's a 16 char limit */ .init = centrino_cpu_init, .exit = centrino_cpu_exit, - .verify = centrino_verify, - .target = centrino_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = centrino_target, .get = get_cur_freq, - .attr = centrino_attr, - .owner = THIS_MODULE, + .attr = cpufreq_generic_attr, }; /* diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index e2e5aa97145..1a07b5904ed 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -49,9 +49,9 @@ static u32 pmbase; * are in kHz for the time being. */ static struct cpufreq_frequency_table speedstep_freqs[] = { - {SPEEDSTEP_HIGH, 0}, - {SPEEDSTEP_LOW, 0}, - {0, CPUFREQ_TABLE_END}, + {0, SPEEDSTEP_HIGH, 0}, + {0, SPEEDSTEP_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; @@ -251,56 +251,23 @@ static unsigned int speedstep_get(unsigned int cpu) /** * speedstep_target - set a new CPUFreq policy * @policy: new policy - * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency - * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @index: index of target frequency * * Sets a new CPUFreq policy. */ -static int speedstep_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned int newstate = 0, policy_cpu; - struct cpufreq_freqs freqs; - - if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], - target_freq, relation, &newstate)) - return -EINVAL; + unsigned int policy_cpu; policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); - freqs.old = speedstep_get(policy_cpu); - freqs.new = speedstep_freqs[newstate].frequency; - - pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); - - /* no transition necessary */ - if (freqs.old == freqs.new) - return 0; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, + smp_call_function_single(policy_cpu, _speedstep_set_state, &index, true); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - return 0; } -/** - * speedstep_verify - verifies a new CPUFreq policy - * @policy: new policy - * - * Limit must be within speedstep_low_freq and speedstep_high_freq, with - * at least one border included. - */ -static int speedstep_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); -} - struct get_freqs { struct cpufreq_policy *policy; int ret; @@ -320,8 +287,7 @@ static void get_freqs_on_cpu(void *_get_freqs) static int speedstep_cpu_init(struct cpufreq_policy *policy) { - int result; - unsigned int policy_cpu, speed; + unsigned int policy_cpu; struct get_freqs gf; /* only run on CPU to be set, or on its sibling */ @@ -336,50 +302,17 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) if (gf.ret) return gf.ret; - /* get current speed setting */ - speed = speedstep_get(policy_cpu); - if (!speed) - return -EIO; - - pr_debug("currently at %s speed setting - %i MHz\n", - (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) - ? "low" : "high", - (speed / 1000)); - - /* cpuinfo and default policy values */ - policy->cur = speed; - - result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); - if (result) - return result; - - cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); - - return 0; + return cpufreq_table_validate_and_show(policy, speedstep_freqs); } -static int speedstep_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *speedstep_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - static struct cpufreq_driver speedstep_driver = { .name = "speedstep-ich", - .verify = speedstep_verify, - .target = speedstep_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = speedstep_target, .init = speedstep_cpu_init, - .exit = speedstep_cpu_exit, .get = speedstep_get, - .owner = THIS_MODULE, - .attr = speedstep_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id ss_smi_ids[] = { diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index f5a6b70ee6c..8635eec96da 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -42,9 +42,9 @@ static enum speedstep_processor speedstep_processor; * are in kHz for the time being. */ static struct cpufreq_frequency_table speedstep_freqs[] = { - {SPEEDSTEP_HIGH, 0}, - {SPEEDSTEP_LOW, 0}, - {0, CPUFREQ_TABLE_END}, + {0, SPEEDSTEP_HIGH, 0}, + {0, SPEEDSTEP_LOW, 0}, + {0, 0, CPUFREQ_TABLE_END}, }; #define GET_SPEEDSTEP_OWNER 0 @@ -141,38 +141,6 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) } /** - * speedstep_get_state - set the SpeedStep state - * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) - * - */ -static int speedstep_get_state(void) -{ - u32 function = GET_SPEEDSTEP_STATE; - u32 result, state, edi, command, dummy; - - command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - - pr_debug("trying to determine current setting with command %x " - "at port %x\n", command, smi_port); - - __asm__ __volatile__( - "push %%ebp\n" - "out %%al, (%%dx)\n" - "pop %%ebp\n" - : "=a" (result), - "=b" (state), "=D" (edi), - "=c" (dummy), "=d" (dummy), "=S" (dummy) - : "a" (command), "b" (function), "c" (0), - "d" (smi_port), "S" (0), "D" (0) - ); - - pr_debug("state is %x, result is %x\n", state, result); - - return state & 1; -} - - -/** * speedstep_set_state - set the SpeedStep state * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * @@ -235,52 +203,21 @@ static void speedstep_set_state(unsigned int state) /** * speedstep_target - set a new CPUFreq policy * @policy: new policy - * @target_freq: new freq - * @relation: + * @index: index of new freq * * Sets a new CPUFreq policy/freq. */ -static int speedstep_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) +static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned int newstate = 0; - struct cpufreq_freqs freqs; - - if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], - target_freq, relation, &newstate)) - return -EINVAL; - - freqs.old = speedstep_freqs[speedstep_get_state()].frequency; - freqs.new = speedstep_freqs[newstate].frequency; - - if (freqs.old == freqs.new) - return 0; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - speedstep_set_state(newstate); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + speedstep_set_state(index); return 0; } -/** - * speedstep_verify - verifies a new CPUFreq policy - * @policy: new policy - * - * Limit must be within speedstep_low_freq and speedstep_high_freq, with - * at least one border included. - */ -static int speedstep_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); -} - - static int speedstep_cpu_init(struct cpufreq_policy *policy) { int result; - unsigned int speed, state; unsigned int *low, *high; /* capability check */ @@ -316,32 +253,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) pr_debug("workaround worked.\n"); } - /* get current speed setting */ - state = speedstep_get_state(); - speed = speedstep_freqs[state].frequency; - - pr_debug("currently at %s speed setting - %i MHz\n", - (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) - ? "low" : "high", - (speed / 1000)); - - /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = speed; - - result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); - if (result) - return result; - - cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); - - return 0; -} - -static int speedstep_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; + return cpufreq_table_validate_and_show(policy, speedstep_freqs); } static unsigned int speedstep_get(unsigned int cpu) @@ -362,21 +275,14 @@ static int speedstep_resume(struct cpufreq_policy *policy) return result; } -static struct freq_attr *speedstep_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver speedstep_driver = { .name = "speedstep-smi", - .verify = speedstep_verify, - .target = speedstep_target, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = speedstep_target, .init = speedstep_cpu_init, - .exit = speedstep_cpu_exit, .get = speedstep_get, .resume = speedstep_resume, - .owner = THIS_MODULE, - .attr = speedstep_attr, + .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id ss_smi_ids[] = { diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c index c74c0e130ef..8084c7f7e20 100644 --- a/drivers/cpufreq/tegra-cpufreq.c +++ b/drivers/cpufreq/tegra-cpufreq.c @@ -26,19 +26,17 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/suspend.h> -/* Frequency table index must be sequential starting at 0 */ static struct cpufreq_frequency_table freq_table[] = { - { 0, 216000 }, - { 1, 312000 }, - { 2, 456000 }, - { 3, 608000 }, - { 4, 760000 }, - { 5, 816000 }, - { 6, 912000 }, - { 7, 1000000 }, - { 8, CPUFREQ_TABLE_END }, + { .frequency = 216000 }, + { .frequency = 312000 }, + { .frequency = 456000 }, + { .frequency = 608000 }, + { .frequency = 760000 }, + { .frequency = 816000 }, + { .frequency = 912000 }, + { .frequency = 1000000 }, + { .frequency = CPUFREQ_TABLE_END }, }; #define NUM_CPUS 2 @@ -47,74 +45,55 @@ static struct clk *cpu_clk; static struct clk *pll_x_clk; static struct clk *pll_p_clk; static struct clk *emc_clk; +static bool pll_x_prepared; -static unsigned long target_cpu_speed[NUM_CPUS]; -static DEFINE_MUTEX(tegra_cpu_lock); -static bool is_suspended; - -static int tegra_verify_speed(struct cpufreq_policy *policy) +static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy, + unsigned int index) { - return cpufreq_frequency_table_verify(policy, freq_table); -} + unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; -static unsigned int tegra_getspeed(unsigned int cpu) -{ - unsigned long rate; - - if (cpu >= NUM_CPUS) + /* + * Don't switch to intermediate freq if: + * - we are already at it, i.e. policy->cur == ifreq + * - index corresponds to ifreq + */ + if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq)) return 0; - rate = clk_get_rate(cpu_clk) / 1000; - return rate; + return ifreq; } -static int tegra_cpu_clk_set_rate(unsigned long rate) +static int tegra_target_intermediate(struct cpufreq_policy *policy, + unsigned int index) { int ret; /* * Take an extra reference to the main pll so it doesn't turn - * off when we move the cpu off of it + * off when we move the cpu off of it as enabling it again while we + * switch to it from tegra_target() would take additional time. + * + * When target-freq is equal to intermediate freq we don't need to + * switch to an intermediate freq and so this routine isn't called. + * Also, we wouldn't be using pll_x anymore and must not take extra + * reference to it, as it can be disabled now to save some power. */ clk_prepare_enable(pll_x_clk); ret = clk_set_parent(cpu_clk, pll_p_clk); - if (ret) { - pr_err("Failed to switch cpu to clock pll_p\n"); - goto out; - } - - if (rate == clk_get_rate(pll_p_clk)) - goto out; - - ret = clk_set_rate(pll_x_clk, rate); - if (ret) { - pr_err("Failed to change pll_x to %lu\n", rate); - goto out; - } - - ret = clk_set_parent(cpu_clk, pll_x_clk); - if (ret) { - pr_err("Failed to switch cpu to clock pll_x\n"); - goto out; - } + if (ret) + clk_disable_unprepare(pll_x_clk); + else + pll_x_prepared = true; -out: - clk_disable_unprepare(pll_x_clk); return ret; } -static int tegra_update_cpu_speed(struct cpufreq_policy *policy, - unsigned long rate) +static int tegra_target(struct cpufreq_policy *policy, unsigned int index) { + unsigned long rate = freq_table[index].frequency; + unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; int ret = 0; - struct cpufreq_freqs freqs; - - freqs.old = tegra_getspeed(0); - freqs.new = rate; - - if (freqs.old == freqs.new) - return ret; /* * Vote on memory bus frequency based on cpu frequency @@ -127,136 +106,83 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy, else clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - -#ifdef CONFIG_CPU_FREQ_DEBUG - printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", - freqs.old, freqs.new); -#endif - - ret = tegra_cpu_clk_set_rate(freqs.new * 1000); - if (ret) { - pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", - freqs.new); - return ret; - } - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static unsigned long tegra_cpu_highest_speed(void) -{ - unsigned long rate = 0; - int i; - - for_each_online_cpu(i) - rate = max(rate, target_cpu_speed[i]); - return rate; -} + /* + * target freq == pll_p, don't need to take extra reference to pll_x_clk + * as it isn't used anymore. + */ + if (rate == ifreq) + return clk_set_parent(cpu_clk, pll_p_clk); -static int tegra_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int idx; - unsigned int freq; - int ret = 0; + ret = clk_set_rate(pll_x_clk, rate * 1000); + /* Restore to earlier frequency on error, i.e. pll_x */ + if (ret) + pr_err("Failed to change pll_x to %lu\n", rate); - mutex_lock(&tegra_cpu_lock); + ret = clk_set_parent(cpu_clk, pll_x_clk); + /* This shouldn't fail while changing or restoring */ + WARN_ON(ret); - if (is_suspended) { - ret = -EBUSY; - goto out; + /* + * Drop count to pll_x clock only if we switched to intermediate freq + * earlier while transitioning to a target frequency. + */ + if (pll_x_prepared) { + clk_disable_unprepare(pll_x_clk); + pll_x_prepared = false; } - cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &idx); - - freq = freq_table[idx].frequency; - - target_cpu_speed[policy->cpu] = freq; - - ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed()); - -out: - mutex_unlock(&tegra_cpu_lock); return ret; } -static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, - void *dummy) -{ - mutex_lock(&tegra_cpu_lock); - if (event == PM_SUSPEND_PREPARE) { - struct cpufreq_policy *policy = cpufreq_cpu_get(0); - is_suspended = true; - pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", - freq_table[0].frequency); - tegra_update_cpu_speed(policy, freq_table[0].frequency); - cpufreq_cpu_put(policy); - } else if (event == PM_POST_SUSPEND) { - is_suspended = false; - } - mutex_unlock(&tegra_cpu_lock); - - return NOTIFY_OK; -} - -static struct notifier_block tegra_cpu_pm_notifier = { - .notifier_call = tegra_pm_notify, -}; - static int tegra_cpu_init(struct cpufreq_policy *policy) { + int ret; + if (policy->cpu >= NUM_CPUS) return -EINVAL; clk_prepare_enable(emc_clk); clk_prepare_enable(cpu_clk); - cpufreq_frequency_table_cpuinfo(policy, freq_table); - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - policy->cur = tegra_getspeed(policy->cpu); - target_cpu_speed[policy->cpu] = policy->cur; - /* FIXME: what's the actual transition time? */ - policy->cpuinfo.transition_latency = 300 * 1000; - - cpumask_copy(policy->cpus, cpu_possible_mask); - - if (policy->cpu == 0) - register_pm_notifier(&tegra_cpu_pm_notifier); + ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); + if (ret) { + clk_disable_unprepare(cpu_clk); + clk_disable_unprepare(emc_clk); + return ret; + } + policy->clk = cpu_clk; + policy->suspend_freq = freq_table[0].frequency; return 0; } static int tegra_cpu_exit(struct cpufreq_policy *policy) { - cpufreq_frequency_table_cpuinfo(policy, freq_table); + clk_disable_unprepare(cpu_clk); clk_disable_unprepare(emc_clk); return 0; } -static struct freq_attr *tegra_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - static struct cpufreq_driver tegra_cpufreq_driver = { - .verify = tegra_verify_speed, - .target = tegra_target, - .get = tegra_getspeed, - .init = tegra_cpu_init, - .exit = tegra_cpu_exit, - .name = "tegra", - .attr = tegra_cpufreq_attr, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .get_intermediate = tegra_get_intermediate, + .target_intermediate = tegra_target_intermediate, + .target_index = tegra_target, + .get = cpufreq_generic_get, + .init = tegra_cpu_init, + .exit = tegra_cpu_exit, + .name = "tegra", + .attr = cpufreq_generic_attr, +#ifdef CONFIG_PM + .suspend = cpufreq_generic_suspend, +#endif }; static int __init tegra_cpufreq_init(void) { - cpu_clk = clk_get_sys(NULL, "cpu"); + cpu_clk = clk_get_sys(NULL, "cclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -264,7 +190,7 @@ static int __init tegra_cpufreq_init(void) if (IS_ERR(pll_x_clk)) return PTR_ERR(pll_x_clk); - pll_p_clk = clk_get_sys(NULL, "pll_p_cclk"); + pll_p_clk = clk_get_sys(NULL, "pll_p"); if (IS_ERR(pll_p_clk)) return PTR_ERR(pll_p_clk); diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index 12fc904d7da..6f9dfa80563 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ +#include <linux/err.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> @@ -24,62 +25,49 @@ static struct cpufreq_driver ucv2_driver; /* make sure that only the "userspace" governor is run * -- anything else wouldn't make sense on this platform, anyway. */ -int ucv2_verify_speed(struct cpufreq_policy *policy) +static int ucv2_verify_speed(struct cpufreq_policy *policy) { if (policy->cpu) return -EINVAL; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - + cpufreq_verify_within_cpu_limits(policy); return 0; } -static unsigned int ucv2_getspeed(unsigned int cpu) -{ - struct clk *mclk = clk_get(NULL, "MAIN_CLK"); - - if (cpu) - return 0; - return clk_get_rate(mclk)/1000; -} - static int ucv2_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - unsigned int cur = ucv2_getspeed(0); struct cpufreq_freqs freqs; - struct clk *mclk = clk_get(NULL, "MAIN_CLK"); + int ret; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + freqs.old = policy->cur; + freqs.new = target_freq; - if (!clk_set_rate(mclk, target_freq * 1000)) { - freqs.old = cur; - freqs.new = target_freq; - } + cpufreq_freq_transition_begin(policy, &freqs); + ret = clk_set_rate(policy->clk, target_freq * 1000); + cpufreq_freq_transition_end(policy, &freqs, ret); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; + return ret; } static int __init ucv2_cpu_init(struct cpufreq_policy *policy) { if (policy->cpu != 0) return -EINVAL; - policy->cur = ucv2_getspeed(0); + policy->min = policy->cpuinfo.min_freq = 250000; policy->max = policy->cpuinfo.max_freq = 1000000; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; + policy->clk = clk_get(NULL, "MAIN_CLK"); + return PTR_ERR_OR_ZERO(policy->clk); } static struct cpufreq_driver ucv2_driver = { .flags = CPUFREQ_STICKY, .verify = ucv2_verify_speed, .target = ucv2_target, - .get = ucv2_getspeed, + .get = cpufreq_generic_get, .init = ucv2_cpu_init, .name = "UniCore-II", }; diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c new file mode 100644 index 00000000000..7f7c9c01b44 --- /dev/null +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -0,0 +1,70 @@ +/* + * Versatile Express SPC CPUFreq Interface driver + * + * It provides necessary ops to arm_big_little cpufreq driver. + * + * Copyright (C) 2013 ARM Ltd. + * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/cpufreq.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/types.h> + +#include "arm_big_little.h" + +static int ve_spc_init_opp_table(struct device *cpu_dev) +{ + /* + * platform specific SPC code must initialise the opp table + * so just check if the OPP count is non-zero + */ + return dev_pm_opp_get_opp_count(cpu_dev) <= 0; +} + +static int ve_spc_get_transition_latency(struct device *cpu_dev) +{ + return 1000000; /* 1 ms */ +} + +static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = { + .name = "vexpress-spc", + .get_transition_latency = ve_spc_get_transition_latency, + .init_opp_table = ve_spc_init_opp_table, +}; + +static int ve_spc_cpufreq_probe(struct platform_device *pdev) +{ + return bL_cpufreq_register(&ve_spc_cpufreq_ops); +} + +static int ve_spc_cpufreq_remove(struct platform_device *pdev) +{ + bL_cpufreq_unregister(&ve_spc_cpufreq_ops); + return 0; +} + +static struct platform_driver ve_spc_cpufreq_platdrv = { + .driver = { + .name = "vexpress-spc-cpufreq", + .owner = THIS_MODULE, + }, + .probe = ve_spc_cpufreq_probe, + .remove = ve_spc_cpufreq_remove, +}; +module_platform_driver(ve_spc_cpufreq_platdrv); + +MODULE_LICENSE("GPL"); |
