aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/netlogic/xlr
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/netlogic/xlr')
-rw-r--r--arch/mips/netlogic/xlr/platform.c4
-rw-r--r--arch/mips/netlogic/xlr/setup.c21
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c3
3 files changed, 3 insertions, 25 deletions
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c
index 7b96a91f477..4785932af24 100644
--- a/arch/mips/netlogic/xlr/platform.c
+++ b/arch/mips/netlogic/xlr/platform.c
@@ -23,7 +23,7 @@
#include <asm/netlogic/xlr/pic.h>
#include <asm/netlogic/xlr/xlr.h>
-unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
+static unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
{
uint64_t uartbase;
unsigned int value;
@@ -41,7 +41,7 @@ unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
return value;
}
-void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
+static void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
{
uint64_t uartbase;
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 214d123b79f..d118b9aa764 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -60,25 +60,6 @@ unsigned int nlm_threads_per_core = 1;
struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
cpumask_t nlm_cpumask = CPU_MASK_CPU0;
-static void __init nlm_early_serial_setup(void)
-{
- struct uart_port s;
- unsigned long uart_base;
-
- uart_base = (unsigned long)nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
- memset(&s, 0, sizeof(s));
- s.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
- s.iotype = UPIO_MEM32;
- s.regshift = 2;
- s.irq = PIC_UART_0_IRQ;
- s.uartclk = PIC_CLK_HZ;
- s.serial_in = nlm_xlr_uart_in;
- s.serial_out = nlm_xlr_uart_out;
- s.mapbase = uart_base;
- s.membase = (unsigned char __iomem *)uart_base;
- early_serial_setup(&s);
-}
-
static void nlm_linux_exit(void)
{
uint64_t gpiobase;
@@ -92,7 +73,6 @@ static void nlm_linux_exit(void)
void __init plat_mem_setup(void)
{
- panic_timeout = 5;
_machine_restart = (void (*)(char *))nlm_linux_exit;
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
@@ -215,7 +195,6 @@ void __init prom_init(void)
memcpy(reset_vec, (void *)nlm_reset_entry,
(nlm_reset_entry_end - nlm_reset_entry));
- nlm_early_serial_setup();
build_arcs_cmdline(argv);
prom_add_memory();
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index 9fb81fa6272..d61cba1e9c6 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -32,7 +32,6 @@
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/threads.h>
@@ -70,7 +69,7 @@ int xlr_wakeup_secondary_cpus(void)
/* Fill up the coremask early */
nodep->coremask = 1;
- for (i = 1; i < NLM_CORES_PER_NODE; i++) {
+ for (i = 1; i < nlm_cores_per_node(); i++) {
for (j = 1000000; j > 0; j--) {
if (cpu_ready[i * NLM_THREADS_PER_CORE])
break;
'>drivers/clk/at91/sckc.h22
-rw-r--r--drivers/clk/bcm/Kconfig9
-rw-r--r--drivers/clk/bcm/Makefile4
-rw-r--r--drivers/clk/bcm/clk-bcm21664.c290
-rw-r--r--drivers/clk/bcm/clk-bcm281xx.c375
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c877
-rw-r--r--drivers/clk/bcm/clk-kona.c1281
-rw-r--r--drivers/clk/bcm/clk-kona.h516
-rw-r--r--drivers/clk/berlin/Makefile4
-rw-r--r--drivers/clk/berlin/berlin2-avpll.c393
-rw-r--r--drivers/clk/berlin/berlin2-avpll.h36
-rw-r--r--drivers/clk/berlin/berlin2-div.c265
-rw-r--r--drivers/clk/berlin/berlin2-div.h89
-rw-r--r--drivers/clk/berlin/berlin2-pll.c117
-rw-r--r--drivers/clk/berlin/berlin2-pll.h37
-rw-r--r--drivers/clk/berlin/bg2.c691
-rw-r--r--drivers/clk/berlin/bg2q.c389
-rw-r--r--drivers/clk/berlin/common.h29
-rw-r--r--drivers/clk/clk-axi-clkgen.c557
-rw-r--r--drivers/clk/clk-axm5516.c615
-rw-r--r--drivers/clk/clk-bcm2835.c60
-rw-r--r--drivers/clk/clk-composite.c238
-rw-r--r--drivers/clk/clk-devres.c55
-rw-r--r--drivers/clk/clk-divider.c465
-rw-r--r--drivers/clk/clk-efm32gg.c81
-rw-r--r--drivers/clk/clk-fixed-factor.c136
-rw-r--r--drivers/clk/clk-fixed-rate.c137
-rw-r--r--drivers/clk/clk-fractional-divider.c135
-rw-r--r--drivers/clk/clk-gate.c164
-rw-r--r--drivers/clk/clk-highbank.c342
-rw-r--r--drivers/clk/clk-ls1x.c111
-rw-r--r--drivers/clk/clk-max77686.c252
-rw-r--r--drivers/clk/clk-moxart.c97
-rw-r--r--drivers/clk/clk-mux.c179
-rw-r--r--drivers/clk/clk-nomadik.c579
-rw-r--r--drivers/clk/clk-nspire.c153
-rw-r--r--drivers/clk/clk-ppc-corenet.c307
-rw-r--r--drivers/clk/clk-s2mps11.c323
-rw-r--r--drivers/clk/clk-si5351.c1587
-rw-r--r--drivers/clk/clk-si5351.h170
-rw-r--r--drivers/clk/clk-si570.c531
-rw-r--r--drivers/clk/clk-twl6040.c126
-rw-r--r--drivers/clk/clk-u300.c1195
-rw-r--r--drivers/clk/clk-vt8500.c720
-rw-r--r--drivers/clk/clk-wm831x.c408
-rw-r--r--drivers/clk/clk-xgene.c521
-rw-r--r--drivers/clk/clk.c2626
-rw-r--r--drivers/clk/clk.h17
-rw-r--r--drivers/clk/clkdev.c359
-rw-r--r--drivers/clk/hisilicon/Makefile9
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c506
-rw-r--r--drivers/clk/hisilicon/clk-hip04.c58
-rw-r--r--drivers/clk/hisilicon/clk-hix5hd2.c101
-rw-r--r--drivers/clk/hisilicon/clk.c234
-rw-r--r--drivers/clk/hisilicon/clk.h111
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c130
-rw-r--r--drivers/clk/keystone/Makefile1
-rw-r--r--drivers/clk/keystone/gate.c269
-rw-r--r--drivers/clk/keystone/pll.c321
-rw-r--r--drivers/clk/mmp/Makefile9
-rw-r--r--drivers/clk/mmp/clk-apbc.c152
-rw-r--r--drivers/clk/mmp/clk-apmu.c97
-rw-r--r--drivers/clk/mmp/clk-frac.c157
-rw-r--r--drivers/clk/mmp/clk-mmp2.c462
-rw-r--r--drivers/clk/mmp/clk-pxa168.c358
-rw-r--r--drivers/clk/mmp/clk-pxa910.c329
-rw-r--r--drivers/clk/mmp/clk.h35
-rw-r--r--drivers/clk/mvebu/Kconfig40
-rw-r--r--drivers/clk/mvebu/Makefile11
-rw-r--r--drivers/clk/mvebu/armada-370.c175
-rw-r--r--drivers/clk/mvebu/armada-375.c184
-rw-r--r--drivers/clk/mvebu/armada-38x.c167
-rw-r--r--drivers/clk/mvebu/armada-xp.c208
-rw-r--r--drivers/clk/mvebu/clk-corediv.c315
-rw-r--r--drivers/clk/mvebu/clk-cpu.c178
-rw-r--r--drivers/clk/mvebu/common.c169
-rw-r--r--drivers/clk/mvebu/common.h48
-rw-r--r--drivers/clk/mvebu/dove.c193
-rw-r--r--drivers/clk/mvebu/kirkwood.c245
-rw-r--r--drivers/clk/mvebu/orion.c210
-rw-r--r--drivers/clk/mxs/Makefile8
-rw-r--r--drivers/clk/mxs/clk-div.c110
-rw-r--r--drivers/clk/mxs/clk-frac.c139
-rw-r--r--drivers/clk/mxs/clk-imx23.c177
-rw-r--r--drivers/clk/mxs/clk-imx28.c255
-rw-r--r--drivers/clk/mxs/clk-pll.c116
-rw-r--r--drivers/clk/mxs/clk-ref.c154
-rw-r--r--drivers/clk/mxs/clk-ssp.c62
-rw-r--r--drivers/clk/mxs/clk.c29
-rw-r--r--drivers/clk/mxs/clk.h66
-rw-r--r--drivers/clk/qcom/Kconfig47
-rw-r--r--drivers/clk/qcom/Makefile15
-rw-r--r--drivers/clk/qcom/clk-branch.c159
-rw-r--r--drivers/clk/qcom/clk-branch.h56
-rw-r--r--drivers/clk/qcom/clk-pll.c222
-rw-r--r--drivers/clk/qcom/clk-pll.h66
-rw-r--r--drivers/clk/qcom/clk-rcg.c517
-rw-r--r--drivers/clk/qcom/clk-rcg.h162
-rw-r--r--drivers/clk/qcom/clk-rcg2.c561
-rw-r--r--drivers/clk/qcom/clk-regmap.c114
-rw-r--r--drivers/clk/qcom/clk-regmap.h45
-rw-r--r--drivers/clk/qcom/common.c101
-rw-r--r--drivers/clk/qcom/common.h34
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c2766
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c2964
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c2759
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2265
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2583
-rw-r--r--drivers/clk/qcom/reset.c63
-rw-r--r--drivers/clk/qcom/reset.h37
-rw-r--r--drivers/clk/rockchip/Makefile5
-rw-r--r--drivers/clk/rockchip/clk-rockchip.c93
-rw-r--r--drivers/clk/samsung/Kconfig26
-rw-r--r--drivers/clk/samsung/Makefile18
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c248
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c780
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1274
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c826
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c1980
-rw-r--r--drivers/clk/samsung/clk-exynos5260.h459
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c209
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c1268
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c129
-rw-r--r--drivers/clk/samsung/clk-pll.c1298
-rw-r--r--drivers/clk/samsung/clk-pll.h104
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c440
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c487
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c274
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c466
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c538
-rw-r--r--drivers/clk/samsung/clk.c314
-rw-r--r--drivers/clk/samsung/clk.h375
-rw-r--r--drivers/clk/shmobile/Makefile10
-rw-r--r--drivers/clk/shmobile/clk-div6.c185
-rw-r--r--drivers/clk/shmobile/clk-emev2.c104
-rw-r--r--drivers/clk/shmobile/clk-mstp.c238
-rw-r--r--drivers/clk/shmobile/clk-r8a7740.c199
-rw-r--r--drivers/clk/shmobile/clk-r8a7779.c180
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c338
-rw-r--r--drivers/clk/shmobile/clk-rz.c103
-rw-r--r--drivers/clk/sirf/Makefile5
-rw-r--r--drivers/clk/sirf/atlas6.h31
-rw-r--r--drivers/clk/sirf/clk-atlas6.c153
-rw-r--r--drivers/clk/sirf/clk-common.c1032
-rw-r--r--drivers/clk/sirf/clk-prima2.c152
-rw-r--r--drivers/clk/sirf/prima2.h25
-rw-r--r--drivers/clk/socfpga/Makefile4
-rw-r--r--drivers/clk/socfpga/clk-gate.c262
-rw-r--r--drivers/clk/socfpga/clk-periph.c110
-rw-r--r--drivers/clk/socfpga/clk-pll.c138
-rw-r--r--drivers/clk/socfpga/clk.c27
-rw-r--r--drivers/clk/socfpga/clk.h61
-rw-r--r--drivers/clk/spear/Makefile10
-rw-r--r--drivers/clk/spear/clk-aux-synth.c199
-rw-r--r--drivers/clk/spear/clk-frac-synth.c165
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c154
-rw-r--r--drivers/clk/spear/clk-vco-pll.c363
-rw-r--r--drivers/clk/spear/clk.c39
-rw-r--r--drivers/clk/spear/clk.h134
-rw-r--r--drivers/clk/spear/spear1310_clock.c1128
-rw-r--r--drivers/clk/spear/spear1340_clock.c1020
-rw-r--r--drivers/clk/spear/spear3xx_clock.c669
-rw-r--r--drivers/clk/spear/spear6xx_clock.c343
-rw-r--r--drivers/clk/st/Makefile1
-rw-r--r--drivers/clk/st/clkgen-fsyn.c1039
-rw-r--r--drivers/clk/st/clkgen-mux.c820
-rw-r--r--drivers/clk/st/clkgen-pll.c701
-rw-r--r--drivers/clk/st/clkgen.h48
-rw-r--r--drivers/clk/sunxi/Makefile9
-rw-r--r--drivers/clk/sunxi/clk-a10-hosc.c73
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c119
-rw-r--r--drivers/clk/sunxi/clk-factors.c155
-rw-r--r--drivers/clk/sunxi/clk-factors.h29
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c99
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0.c77
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c233
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c1238
-rw-r--r--drivers/clk/tegra/Makefile17
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c87
-rw-r--r--drivers/clk/tegra/clk-divider.c187
-rw-r--r--drivers/clk/tegra/clk-id.h240
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c170
-rw-r--r--drivers/clk/tegra/clk-periph.c206
-rw-r--r--drivers/clk/tegra/clk-pll-out.c123
-rw-r--r--drivers/clk/tegra/clk-pll.c1873
-rw-r--r--drivers/clk/tegra/clk-super.c166
-rw-r--r--drivers/clk/tegra/clk-tegra-audio.c215
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c111
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c684
-rw-r--r--drivers/clk/tegra/clk-tegra-pmc.c132
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c149
-rw-r--r--drivers/clk/tegra/clk-tegra114.c1486
-rw-r--r--drivers/clk/tegra/clk-tegra124.c1428
-rw-r--r--drivers/clk/tegra/clk-tegra20.c1131
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1449
-rw-r--r--drivers/clk/tegra/clk.c299
-rw-r--r--drivers/clk/tegra/clk.h633
-rw-r--r--drivers/clk/ti/Makefile13
-rw-r--r--drivers/clk/ti/apll.c402
-rw-r--r--drivers/clk/ti/autoidle.c133
-rw-r--r--drivers/clk/ti/clk-2xxx.c256
-rw-r--r--drivers/clk/ti/clk-33xx.c160
-rw-r--r--drivers/clk/ti/clk-3xxx.c397
-rw-r--r--drivers/clk/ti/clk-43xx.c140
-rw-r--r--drivers/clk/ti/clk-44xx.c315
-rw-r--r--drivers/clk/ti/clk-54xx.c260
-rw-r--r--drivers/clk/ti/clk-7xx.c336
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c312
-rw-r--r--drivers/clk/ti/clk.c167
-rw-r--r--drivers/clk/ti/clockdomain.c70
-rw-r--r--drivers/clk/ti/composite.c269
-rw-r--r--drivers/clk/ti/divider.c487
-rw-r--r--drivers/clk/ti/dpll.c627
-rw-r--r--drivers/clk/ti/fixed-factor.c66
-rw-r--r--drivers/clk/ti/gate.c249
-rw-r--r--drivers/clk/ti/interface.c136
-rw-r--r--drivers/clk/ti/mux.c246
-rw-r--r--drivers/clk/ux500/Makefile17
-rw-r--r--drivers/clk/ux500/abx500-clk.c138
-rw-r--r--drivers/clk/ux500/clk-prcc.c163
-rw-r--r--drivers/clk/ux500/clk-prcmu.c350
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c227
-rw-r--r--drivers/clk/ux500/clk.h90
-rw-r--r--drivers/clk/ux500/u8500_clk.c525
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c560
-rw-r--r--drivers/clk/ux500/u8540_clk.c579
-rw-r--r--drivers/clk/ux500/u9540_clk.c21
-rw-r--r--drivers/clk/versatile/Kconfig26
-rw-r--r--drivers/clk/versatile/Makefile8
-rw-r--r--drivers/clk/versatile/clk-icst.c164
-rw-r--r--drivers/clk/versatile/clk-icst.h20
-rw-r--r--drivers/clk/versatile/clk-impd1.c181
-rw-r--r--drivers/clk/versatile/clk-integrator.c69
-rw-r--r--drivers/clk/versatile/clk-realview.c95
-rw-r--r--drivers/clk/versatile/clk-sp810.c188
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c137
-rw-r--r--drivers/clk/versatile/clk-vexpress.c86
-rw-r--r--drivers/clk/x86/Makefile2
-rw-r--r--drivers/clk/x86/clk-lpt.c53
-rw-r--r--drivers/clk/zynq/Makefile3
-rw-r--r--drivers/clk/zynq/clkc.c623
-rw-r--r--drivers/clk/zynq/pll.c244
259 files changed, 89462 insertions, 0 deletions
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
new file mode 100644
index 00000000000..9f9c5ae5359
--- /dev/null
+++ b/drivers/clk/Kconfig
@@ -0,0 +1,112 @@
+
+config CLKDEV_LOOKUP
+ bool
+ select HAVE_CLK
+
+config HAVE_CLK_PREPARE
+ bool
+
+config HAVE_MACH_CLKDEV
+ bool
+
+config COMMON_CLK
+ bool
+ select HAVE_CLK_PREPARE
+ select CLKDEV_LOOKUP
+ ---help---
+ The common clock framework is a single definition of struct
+ clk, useful across many platforms, as well as an
+ implementation of the clock API in include/linux/clk.h.
+ Architectures utilizing the common struct clk should select
+ this option.
+
+menu "Common Clock Framework"
+ depends on COMMON_CLK
+
+config COMMON_CLK_WM831X
+ tristate "Clock driver for WM831x/2x PMICs"
+ depends on MFD_WM831X
+ ---help---
+ Supports the clocking subsystem of the WM831x/2x series of
+ PMICs from Wolfson Microlectronics.
+
+source "drivers/clk/versatile/Kconfig"
+
+config COMMON_CLK_MAX77686
+ tristate "Clock driver for Maxim 77686 MFD"
+ depends on MFD_MAX77686
+ ---help---
+ This driver supports Maxim 77686 crystal oscillator clock.
+
+config COMMON_CLK_SI5351
+ tristate "Clock driver for SiLabs 5351A/B/C"
+ depends on I2C
+ select REGMAP_I2C
+ select RATIONAL
+ ---help---
+ This driver supports Silicon Labs 5351A/B/C programmable clock
+ generators.
+
+config COMMON_CLK_SI570
+ tristate "Clock driver for SiLabs 570 and compatible devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports Silicon Labs 570/571/598/599 programmable
+ clock generators.
+
+config COMMON_CLK_S2MPS11
+ tristate "Clock driver for S2MPS1X/S5M8767 MFD"
+ depends on MFD_SEC_CORE
+ ---help---
+ This driver supports S2MPS11/S2MPS14/S5M8767 crystal oscillator
+ clock. These multi-function devices have two (S2MPS14) or three
+ (S2MPS11, S5M8767) fixed-rate oscillators, clocked at 32KHz each.
+
+config CLK_TWL6040
+ tristate "External McPDM functional clock from twl6040"
+ depends on TWL6040_CORE
+ ---help---
+ Enable the external functional clock support on OMAP4+ platforms for
+ McPDM. McPDM module is using the external bit clock on the McPDM bus
+ as functional clock.
+
+config COMMON_CLK_AXI_CLKGEN
+ tristate "AXI clkgen driver"
+ depends on ARCH_ZYNQ || MICROBLAZE
+ help
+ ---help---
+ Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
+ FPGAs. It is commonly used in Analog Devices' reference designs.
+
+config CLK_PPC_CORENET
+ bool "Clock driver for PowerPC corenet platforms"
+ depends on PPC_E500MC && OF
+ ---help---
+ This adds the clock driver support for Freescale PowerPC corenet
+ platforms using common clock framework.
+
+config COMMON_CLK_XGENE
+ bool "Clock driver for APM XGene SoC"
+ default y
+ depends on ARM64
+ ---help---
+ Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
+
+config COMMON_CLK_KEYSTONE
+ tristate "Clock drivers for Keystone based SOCs"
+ depends on ARCH_KEYSTONE && OF
+ ---help---
+ Supports clock drivers for Keystone based SOCs. These SOCs have local
+ a power sleep control module that gate the clock to the IPs and PLLs.
+
+source "drivers/clk/qcom/Kconfig"
+
+endmenu
+
+source "drivers/clk/bcm/Kconfig"
+source "drivers/clk/mvebu/Kconfig"
+
+source "drivers/clk/samsung/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
new file mode 100644
index 00000000000..567f1025902
--- /dev/null
+++ b/drivers/clk/Makefile
@@ -0,0 +1,60 @@
+# common clock types
+obj-$(CONFIG_HAVE_CLK) += clk-devres.o
+obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
+obj-$(CONFIG_COMMON_CLK) += clk.o
+obj-$(CONFIG_COMMON_CLK) += clk-divider.o
+obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
+obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
+obj-$(CONFIG_COMMON_CLK) += clk-gate.o
+obj-$(CONFIG_COMMON_CLK) += clk-mux.o
+obj-$(CONFIG_COMMON_CLK) += clk-composite.o
+obj-$(CONFIG_COMMON_CLK) += clk-fractional-divider.o
+
+# hardware specific clock types
+# please keep this section sorted lexicographically by file/directory path name
+obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
+obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o
+obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
+obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
+obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
+obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
+obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
+obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
+obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
+obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
+obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
+obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
+obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
+obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
+obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
+obj-$(CONFIG_ARCH_U300) += clk-u300.o
+obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
+obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
+obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
+obj-$(CONFIG_COMMON_CLK_AT91) += at91/
+obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/
+obj-$(CONFIG_ARCH_BERLIN) += berlin/
+obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/
+obj-$(CONFIG_ARCH_HIP04) += hisilicon/
+obj-$(CONFIG_ARCH_HIX5HD2) += hisilicon/
+obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
+ifeq ($(CONFIG_COMMON_CLK), y)
+obj-$(CONFIG_ARCH_MMP) += mmp/
+endif
+obj-$(CONFIG_PLAT_ORION) += mvebu/
+obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
+obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
+obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
+obj-$(CONFIG_ARCH_SIRF) += sirf/
+obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
+obj-$(CONFIG_PLAT_SPEAR) += spear/
+obj-$(CONFIG_ARCH_STI) += st/
+obj-$(CONFIG_ARCH_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
+obj-$(CONFIG_ARCH_U8500) += ux500/
+obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
+obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_ARCH_ZYNQ) += zynq/
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
new file mode 100644
index 00000000000..4998aee5926
--- /dev/null
+++ b/drivers/clk/at91/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for at91 specific clk
+#
+
+obj-y += pmc.o sckc.o
+obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o
+obj-y += clk-system.o clk-peripheral.o clk-programmable.o
+
+obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o
+obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
+obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
new file mode 100644
index 00000000000..733306131b9
--- /dev/null
+++ b/drivers/clk/at91/clk-main.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "pmc.h"
+
+#define SLOW_CLOCK_FREQ 32768
+#define MAINF_DIV 16
+#define MAINFRDY_TIMEOUT (((MAINF_DIV + 1) * USEC_PER_SEC) / \
+ SLOW_CLOCK_FREQ)
+#define MAINF_LOOP_MIN_WAIT (USEC_PER_SEC / SLOW_CLOCK_FREQ)
+#define MAINF_LOOP_MAX_WAIT MAINFRDY_TIMEOUT
+
+#define MOR_KEY_MASK (0xff << 16)
+
+struct clk_main_osc {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+};
+
+#define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw)
+
+struct clk_main_rc_osc {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ unsigned long frequency;
+ unsigned long accuracy;
+};
+
+#define to_clk_main_rc_osc(hw) container_of(hw, struct clk_main_rc_osc, hw)
+
+struct clk_rm9200_main {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_clk_rm9200_main(hw) container_of(hw, struct clk_rm9200_main, hw)
+
+struct clk_sam9x5_main {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ u8 parent;
+};
+
+#define to_clk_sam9x5_main(hw) container_of(hw, struct clk_sam9x5_main, hw)
+
+static irqreturn_t clk_main_osc_irq_handler(int irq, void *dev_id)
+{
+ struct clk_main_osc *osc = dev_id;
+
+ wake_up(&osc->wait);
+ disable_irq_nosync(osc->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_main_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+ if (tmp & AT91_PMC_OSCBYPASS)
+ return 0;
+
+ if (!(tmp & AT91_PMC_MOSCEN)) {
+ tmp |= AT91_PMC_MOSCEN | AT91_PMC_KEY;
+ pmc_write(pmc, AT91_CKGR_MOR, tmp);
+ }
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) {
+ enable_irq(osc->irq);
+ wait_event(osc->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
+ }
+
+ return 0;
+}
+
+static void clk_main_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+
+ if (tmp & AT91_PMC_OSCBYPASS)
+ return;
+
+ if (!(tmp & AT91_PMC_MOSCEN))
+ return;
+
+ tmp &= ~(AT91_PMC_KEY | AT91_PMC_MOSCEN);
+ pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
+}
+
+static int clk_main_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+
+ if (tmp & AT91_PMC_OSCBYPASS)
+ return 1;
+
+ return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS) &&
+ (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN));
+}
+
+static const struct clk_ops main_osc_ops = {
+ .prepare = clk_main_osc_prepare,
+ .unprepare = clk_main_osc_unprepare,
+ .is_prepared = clk_main_osc_is_prepared,
+};
+
+static struct clk * __init
+at91_clk_register_main_osc(struct at91_pmc *pmc,
+ unsigned int irq,
+ const char *name,
+ const char *parent_name,
+ bool bypass)
+{
+ int ret;
+ struct clk_main_osc *osc;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name || !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &main_osc_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->pmc = pmc;
+ osc->irq = irq;
+
+ init_waitqueue_head(&osc->wait);
+ irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
+ ret = request_irq(osc->irq, clk_main_osc_irq_handler,
+ IRQF_TRIGGER_HIGH, name, osc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (bypass)
+ pmc_write(pmc, AT91_CKGR_MOR,
+ (pmc_read(pmc, AT91_CKGR_MOR) &
+ ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) |
+ AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
+
+ clk = clk_register(NULL, &osc->hw);
+ if (IS_ERR(clk)) {
+ free_irq(irq, osc);
+ kfree(osc);
+ }
+
+ return clk;
+}
+
+void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ unsigned int irq;
+ const char *name = np->name;
+ const char *parent_name;
+ bool bypass;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_main_osc(pmc, irq, name, parent_name, bypass);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id)
+{
+ struct clk_main_rc_osc *osc = dev_id;
+
+ wake_up(&osc->wait);
+ disable_irq_nosync(osc->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_main_rc_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+
+ if (!(tmp & AT91_PMC_MOSCRCEN)) {
+ tmp |= AT91_PMC_MOSCRCEN | AT91_PMC_KEY;
+ pmc_write(pmc, AT91_CKGR_MOR, tmp);
+ }
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS)) {
+ enable_irq(osc->irq);
+ wait_event(osc->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS);
+ }
+
+ return 0;
+}
+
+static void clk_main_rc_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+ u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+
+ if (!(tmp & AT91_PMC_MOSCRCEN))
+ return;
+
+ tmp &= ~(MOR_KEY_MASK | AT91_PMC_MOSCRCEN);
+ pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
+}
+
+static int clk_main_rc_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+ struct at91_pmc *pmc = osc->pmc;
+
+ return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS) &&
+ (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCRCEN));
+}
+
+static unsigned long clk_main_rc_osc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+
+ return osc->frequency;
+}
+
+static unsigned long clk_main_rc_osc_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_acc)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+
+ return osc->accuracy;
+}
+
+static const struct clk_ops main_rc_osc_ops = {
+ .prepare = clk_main_rc_osc_prepare,
+ .unprepare = clk_main_rc_osc_unprepare,
+ .is_prepared = clk_main_rc_osc_is_prepared,
+ .recalc_rate = clk_main_rc_osc_recalc_rate,
+ .recalc_accuracy = clk_main_rc_osc_recalc_accuracy,
+};
+
+static struct clk * __init
+at91_clk_register_main_rc_osc(struct at91_pmc *pmc,
+ unsigned int irq,
+ const char *name,
+ u32 frequency, u32 accuracy)
+{
+ int ret;
+ struct clk_main_rc_osc *osc;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name || !frequency)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &main_rc_osc_ops;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->pmc = pmc;
+ osc->irq = irq;
+ osc->frequency = frequency;
+ osc->accuracy = accuracy;
+
+ init_waitqueue_head(&osc->wait);
+ irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
+ ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler,
+ IRQF_TRIGGER_HIGH, name, osc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &osc->hw);
+ if (IS_ERR(clk)) {
+ free_irq(irq, osc);
+ kfree(osc);
+ }
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ unsigned int irq;
+ u32 frequency = 0;
+ u32 accuracy = 0;
+ const char *name = np->name;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "clock-frequency", &frequency);
+ of_property_read_u32(np, "clock-accuracy", &accuracy);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_main_rc_osc(pmc, irq, name, frequency,
+ accuracy);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+
+static int clk_main_probe_frequency(struct at91_pmc *pmc)
+{
+ unsigned long prep_time, timeout;
+ u32 tmp;
+
+ timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT);
+ do {
+ prep_time = jiffies;
+ tmp = pmc_read(pmc, AT91_CKGR_MCFR);
+ if (tmp & AT91_PMC_MAINRDY)
+ return 0;
+ usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
+ } while (time_before(prep_time, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+
+ if (parent_rate)
+ return parent_rate;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MCFR);
+ if (!(tmp & AT91_PMC_MAINRDY))
+ return 0;
+
+ return ((tmp & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV;
+}
+
+static int clk_rm9200_main_prepare(struct clk_hw *hw)
+{
+ struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
+
+ return clk_main_probe_frequency(clkmain->pmc);
+}
+
+static int clk_rm9200_main_is_prepared(struct clk_hw *hw)
+{
+ struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
+
+ return !!(pmc_read(clkmain->pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINRDY);
+}
+
+static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
+
+ return clk_main_recalc_rate(clkmain->pmc, parent_rate);
+}
+
+static const struct clk_ops rm9200_main_ops = {
+ .prepare = clk_rm9200_main_prepare,
+ .is_prepared = clk_rm9200_main_is_prepared,
+ .recalc_rate = clk_rm9200_main_recalc_rate,
+};
+
+static struct clk * __init
+at91_clk_register_rm9200_main(struct at91_pmc *pmc,
+ const char *name,
+ const char *parent_name)
+{
+ struct clk_rm9200_main *clkmain;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name)
+ return ERR_PTR(-EINVAL);
+
+ if (!parent_name)
+ return ERR_PTR(-EINVAL);
+
+ clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL);
+ if (!clkmain)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &rm9200_main_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ clkmain->hw.init = &init;
+ clkmain->pmc = pmc;
+
+ clk = clk_register(NULL, &clkmain->hw);
+ if (IS_ERR(clk))
+ kfree(clkmain);
+
+ return clk;
+}
+
+void __init of_at91rm9200_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_rm9200_main(pmc, name, parent_name);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id)
+{
+ struct clk_sam9x5_main *clkmain = dev_id;
+
+ wake_up(&clkmain->wait);
+ disable_irq_nosync(clkmain->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_sam9x5_main_prepare(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+ struct at91_pmc *pmc = clkmain->pmc;
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
+ enable_irq(clkmain->irq);
+ wait_event(clkmain->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
+ }
+
+ return clk_main_probe_frequency(pmc);
+}
+
+static int clk_sam9x5_main_is_prepared(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+
+ return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
+}
+
+static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+
+ return clk_main_recalc_rate(clkmain->pmc, parent_rate);
+}
+
+static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+ struct at91_pmc *pmc = clkmain->pmc;
+ u32 tmp;
+
+ if (index > 1)
+ return -EINVAL;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+
+ if (index && !(tmp & AT91_PMC_MOSCSEL))
+ pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
+ else if (!index && (tmp & AT91_PMC_MOSCSEL))
+ pmc_write(pmc, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
+ enable_irq(clkmain->irq);
+ wait_event(clkmain->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
+ }
+
+ return 0;
+}
+
+static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+
+ return !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN);
+}
+
+static const struct clk_ops sam9x5_main_ops = {
+ .prepare = clk_sam9x5_main_prepare,
+ .is_prepared = clk_sam9x5_main_is_prepared,
+ .recalc_rate = clk_sam9x5_main_recalc_rate,
+ .set_parent = clk_sam9x5_main_set_parent,
+ .get_parent = clk_sam9x5_main_get_parent,
+};
+
+static struct clk * __init
+at91_clk_register_sam9x5_main(struct at91_pmc *pmc,
+ unsigned int irq,
+ const char *name,
+ const char **parent_names,
+ int num_parents)
+{
+ int ret;
+ struct clk_sam9x5_main *clkmain;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name)
+ return ERR_PTR(-EINVAL);
+
+ if (!parent_names || !num_parents)
+ return ERR_PTR(-EINVAL);
+
+ clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL);
+ if (!clkmain)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9x5_main_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_PARENT_GATE;
+
+ clkmain->hw.init = &init;
+ clkmain->pmc = pmc;
+ clkmain->irq = irq;
+ clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) &
+ AT91_PMC_MOSCEN);
+ init_waitqueue_head(&clkmain->wait);
+ irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN);
+ ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler,
+ IRQF_TRIGGER_HIGH, name, clkmain);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &clkmain->hw);
+ if (IS_ERR(clk)) {
+ free_irq(clkmain->irq, clkmain);
+ kfree(clkmain);
+ }
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_names[2];
+ int num_parents;
+ unsigned int irq;
+ const char *name = np->name;
+ int i;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > 2)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_sam9x5_main(pmc, irq, name, parent_names,
+ num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
new file mode 100644
index 00000000000..c1af80bcdf2
--- /dev/null
+++ b/drivers/clk/at91/clk-master.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pmc.h"
+
+#define MASTER_SOURCE_MAX 4
+
+#define MASTER_PRES_MASK 0x7
+#define MASTER_PRES_MAX MASTER_PRES_MASK
+#define MASTER_DIV_SHIFT 8
+#define MASTER_DIV_MASK 0x3
+
+struct clk_master_characteristics {
+ struct clk_range output;
+ u32 divisors[4];
+ u8 have_div3_pres;
+};
+
+struct clk_master_layout {
+ u32 mask;
+ u8 pres_shift;
+};
+
+#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
+
+struct clk_master {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ const struct clk_master_layout *layout;
+ const struct clk_master_characteristics *characteristics;
+};
+
+static irqreturn_t clk_master_irq_handler(int irq, void *dev_id)
+{
+ struct clk_master *master = (struct clk_master *)dev_id;
+
+ wake_up(&master->wait);
+ disable_irq_nosync(master->irq);
+
+ return IRQ_HANDLED;
+}
+static int clk_master_prepare(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY)) {
+ enable_irq(master->irq);
+ wait_event(master->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
+ }
+
+ return 0;
+}
+
+static int clk_master_is_prepared(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ return !!(pmc_read(master->pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
+}
+
+static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u8 pres;
+ u8 div;
+ unsigned long rate = parent_rate;
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+ const struct clk_master_layout *layout = master->layout;
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ u32 tmp;
+
+ pmc_lock(pmc);
+ tmp = pmc_read(pmc, AT91_PMC_MCKR) & layout->mask;
+ pmc_unlock(pmc);
+
+ pres = (tmp >> layout->pres_shift) & MASTER_PRES_MASK;
+ div = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+
+ if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX)
+ rate /= 3;
+ else
+ rate >>= pres;
+
+ rate /= characteristics->divisors[div];
+
+ if (rate < characteristics->output.min)
+ pr_warn("master clk is underclocked");
+ else if (rate > characteristics->output.max)
+ pr_warn("master clk is overclocked");
+
+ return rate;
+}
+
+static u8 clk_master_get_parent(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+
+ return pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_CSS;
+}
+
+static const struct clk_ops master_ops = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_recalc_rate,
+ .get_parent = clk_master_get_parent,
+};
+
+static struct clk * __init
+at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
+ const char *name, int num_parents,
+ const char **parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics)
+{
+ int ret;
+ struct clk_master *master;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name || !num_parents || !parent_names)
+ return ERR_PTR(-EINVAL);
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &master_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = 0;
+
+ master->hw.init = &init;
+ master->layout = layout;
+ master->characteristics = characteristics;
+ master->pmc = pmc;
+ master->irq = irq;
+ init_waitqueue_head(&master->wait);
+ irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
+ ret = request_irq(master->irq, clk_master_irq_handler,
+ IRQF_TRIGGER_HIGH, "clk-master", master);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &master->hw);
+ if (IS_ERR(clk))
+ kfree(master);
+
+ return clk;
+}
+
+
+static const struct clk_master_layout at91rm9200_master_layout = {
+ .mask = 0x31F,
+ .pres_shift = 2,
+};
+
+static const struct clk_master_layout at91sam9x5_master_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+};
+
+
+static struct clk_master_characteristics * __init
+of_at91_clk_master_get_characteristics(struct device_node *np)
+{
+ struct clk_master_characteristics *characteristics;
+
+ characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
+ if (!characteristics)
+ return NULL;
+
+ if (of_at91_get_clk_range(np, "atmel,clk-output-range", &characteristics->output))
+ goto out_free_characteristics;
+
+ of_property_read_u32_array(np, "atmel,clk-divisors",
+ characteristics->divisors, 4);
+
+ characteristics->have_div3_pres =
+ of_property_read_bool(np, "atmel,master-clk-have-div3-pres");
+
+ return characteristics;
+
+out_free_characteristics:
+ kfree(characteristics);
+ return NULL;
+}
+
+static void __init
+of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_master_layout *layout)
+{
+ struct clk *clk;
+ int num_parents;
+ int i;
+ unsigned int irq;
+ const char *parent_names[MASTER_SOURCE_MAX];
+ const char *name = np->name;
+ struct clk_master_characteristics *characteristics;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ characteristics = of_at91_clk_master_get_characteristics(np);
+ if (!characteristics)
+ return;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ goto out_free_characteristics;
+
+ clk = at91_clk_register_master(pmc, irq, name, num_parents,
+ parent_names, layout,
+ characteristics);
+ if (IS_ERR(clk))
+ goto out_free_characteristics;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+
+out_free_characteristics:
+ kfree(characteristics);
+}
+
+void __init of_at91rm9200_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_master_setup(np, pmc, &at91rm9200_master_layout);
+}
+
+void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_master_setup(np, pmc, &at91sam9x5_master_layout);
+}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
new file mode 100644
index 00000000000..597fed423d7
--- /dev/null
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define PERIPHERAL_MAX 64
+
+#define PERIPHERAL_AT91RM9200 0
+#define PERIPHERAL_AT91SAM9X5 1
+
+#define PERIPHERAL_ID_MIN 2
+#define PERIPHERAL_ID_MAX 31
+#define PERIPHERAL_MASK(id) (1 << ((id) & PERIPHERAL_ID_MAX))
+
+#define PERIPHERAL_RSHIFT_MASK 0x3
+#define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK)
+
+#define PERIPHERAL_MAX_SHIFT 4
+
+struct clk_peripheral {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u32 id;
+};
+
+#define to_clk_peripheral(hw) container_of(hw, struct clk_peripheral, hw)
+
+struct clk_sam9x5_peripheral {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ struct clk_range range;
+ u32 id;
+ u32 div;
+ bool auto_div;
+};
+
+#define to_clk_sam9x5_peripheral(hw) \
+ container_of(hw, struct clk_sam9x5_peripheral, hw)
+
+static int clk_peripheral_enable(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCER;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return 0;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCER1;
+ pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+ return 0;
+}
+
+static void clk_peripheral_disable(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCDR;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCDR1;
+ pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+}
+
+static int clk_peripheral_is_enabled(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCSR;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return 1;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCSR1;
+ return !!(pmc_read(pmc, offset) & PERIPHERAL_MASK(id));
+}
+
+static const struct clk_ops peripheral_ops = {
+ .enable = clk_peripheral_enable,
+ .disable = clk_peripheral_disable,
+ .is_enabled = clk_peripheral_is_enabled,
+};
+
+static struct clk * __init
+at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u32 id)
+{
+ struct clk_peripheral *periph;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name || !parent_name || id > PERIPHERAL_ID_MAX)
+ return ERR_PTR(-EINVAL);
+
+ periph = kzalloc(sizeof(*periph), GFP_KERNEL);
+ if (!periph)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &peripheral_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = 0;
+
+ periph->id = id;
+ periph->hw.init = &init;
+ periph->pmc = pmc;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ kfree(periph);
+
+ return clk;
+}
+
+static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
+{
+ struct clk *parent;
+ unsigned long parent_rate;
+ int shift = 0;
+
+ if (!periph->auto_div)
+ return;
+
+ if (periph->range.max) {
+ parent = clk_get_parent_by_index(periph->hw.clk, 0);
+ parent_rate = __clk_get_rate(parent);
+ if (!parent_rate)
+ return;
+
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ if (parent_rate >> shift <= periph->range.max)
+ break;
+ }
+ }
+
+ periph->auto_div = false;
+ periph->div = shift;
+}
+
+static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return 0;
+
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID) |
+ AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_DIV(periph->div) |
+ AT91_PMC_PCR_EN);
+ return 0;
+}
+
+static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return;
+
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID) |
+ AT91_PMC_PCR_CMD);
+}
+
+static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int ret;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return 1;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID));
+ ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN);
+ pmc_unlock(pmc);
+
+ return ret;
+}
+
+static unsigned long
+clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ u32 tmp;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return parent_rate;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID));
+ tmp = pmc_read(pmc, AT91_PMC_PCR);
+ pmc_unlock(pmc);
+
+ if (tmp & AT91_PMC_PCR_EN) {
+ periph->div = PERIPHERAL_RSHIFT(tmp);
+ periph->auto_div = false;
+ } else {
+ clk_sam9x5_peripheral_autodiv(periph);
+ }
+
+ return parent_rate >> periph->div;
+}
+
+static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ int shift = 0;
+ unsigned long best_rate;
+ unsigned long best_diff;
+ unsigned long cur_rate = *parent_rate;
+ unsigned long cur_diff;
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
+ return *parent_rate;
+
+ if (periph->range.max) {
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ cur_rate = *parent_rate >> shift;
+ if (cur_rate <= periph->range.max)
+ break;
+ }
+ }
+
+ if (rate >= cur_rate)
+ return cur_rate;
+
+ best_diff = cur_rate - rate;
+ best_rate = cur_rate;
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ cur_rate = *parent_rate >> shift;
+ if (cur_rate < rate)
+ cur_diff = rate - cur_rate;
+ else
+ cur_diff = cur_rate - rate;
+
+ if (cur_diff < best_diff) {
+ best_diff = cur_diff;
+ best_rate = cur_rate;
+ }
+
+ if (!best_diff || cur_rate < rate)
+ break;
+ }
+
+ return best_rate;
+}
+
+static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ int shift;
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ if (parent_rate == rate)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
+ if (periph->range.max && rate > periph->range.max)
+ return -EINVAL;
+
+ for (shift = 0; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ if (parent_rate >> shift == rate) {
+ periph->auto_div = false;
+ periph->div = shift;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct clk_ops sam9x5_peripheral_ops = {
+ .enable = clk_sam9x5_peripheral_enable,
+ .disable = clk_sam9x5_peripheral_disable,
+ .is_enabled = clk_sam9x5_peripheral_is_enabled,
+ .recalc_rate = clk_sam9x5_peripheral_recalc_rate,
+ .round_rate = clk_sam9x5_peripheral_round_rate,
+ .set_rate = clk_sam9x5_peripheral_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u32 id,
+ const struct clk_range *range)
+{
+ struct clk_sam9x5_peripheral *periph;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name || !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ periph = kzalloc(sizeof(*periph), GFP_KERNEL);
+ if (!periph)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9x5_peripheral_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = 0;
+
+ periph->id = id;
+ periph->hw.init = &init;
+ periph->div = 0;
+ periph->pmc = pmc;
+ periph->auto_div = true;
+ periph->range = *range;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ kfree(periph);
+ else
+ clk_sam9x5_peripheral_autodiv(periph);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
+{
+ int num;
+ u32 id;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name;
+ struct device_node *periphclknp;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ num = of_get_child_count(np);
+ if (!num || num > PERIPHERAL_MAX)
+ return;
+
+ for_each_child_of_node(np, periphclknp) {
+ if (of_property_read_u32(periphclknp, "reg", &id))
+ continue;
+
+ if (id >= PERIPHERAL_MAX)
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = periphclknp->name;
+
+ if (type == PERIPHERAL_AT91RM9200) {
+ clk = at91_clk_register_peripheral(pmc, name,
+ parent_name, id);
+ } else {
+ struct clk_range range = CLK_RANGE(0, 0);
+
+ of_at91_get_clk_range(periphclknp,
+ "atmel,clk-output-range",
+ &range);
+
+ clk = at91_clk_register_sam9x5_peripheral(pmc, name,
+ parent_name,
+ id, &range);
+ }
+
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(periphclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91RM9200);
+}
+
+void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91SAM9X5);
+}
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
new file mode 100644
index 00000000000..cf6ed023504
--- /dev/null
+++ b/drivers/clk/at91/clk-pll.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pmc.h"
+
+#define PLL_STATUS_MASK(id) (1 << (1 + (id)))
+#define PLL_REG(id) (AT91_CKGR_PLLAR + ((id) * 4))
+#define PLL_DIV_MASK 0xff
+#define PLL_DIV_MAX PLL_DIV_MASK
+#define PLL_DIV(reg) ((reg) & PLL_DIV_MASK)
+#define PLL_MUL(reg, layout) (((reg) >> (layout)->mul_shift) & \
+ (layout)->mul_mask)
+#define PLL_ICPR_SHIFT(id) ((id) * 16)
+#define PLL_ICPR_MASK(id) (0xffff << PLL_ICPR_SHIFT(id))
+#define PLL_MAX_COUNT 0x3ff
+#define PLL_COUNT_SHIFT 8
+#define PLL_OUT_SHIFT 14
+#define PLL_MAX_ID 1
+
+struct clk_pll_characteristics {
+ struct clk_range input;
+ int num_output;
+ struct clk_range *output;
+ u16 *icpll;
+ u8 *out;
+};
+
+struct clk_pll_layout {
+ u32 pllr_mask;
+ u16 mul_mask;
+ u8 mul_shift;
+};
+
+#define to_clk_pll(hw) container_of(hw, struct clk_pll, hw)
+
+struct clk_pll {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ u8 id;
+ u8 div;
+ u8 range;
+ u16 mul;
+ const struct clk_pll_layout *layout;
+ const struct clk_pll_characteristics *characteristics;
+};
+
+static irqreturn_t clk_pll_irq_handler(int irq, void *dev_id)
+{
+ struct clk_pll *pll = (struct clk_pll *)dev_id;
+
+ wake_up(&pll->wait);
+ disable_irq_nosync(pll->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+ const struct clk_pll_layout *layout = pll->layout;
+ const struct clk_pll_characteristics *characteristics =
+ pll->characteristics;
+ u8 id = pll->id;
+ u32 mask = PLL_STATUS_MASK(id);
+ int offset = PLL_REG(id);
+ u8 out = 0;
+ u32 pllr, icpr;
+ u8 div;
+ u16 mul;
+
+ pllr = pmc_read(pmc, offset);
+ div = PLL_DIV(pllr);
+ mul = PLL_MUL(pllr, layout);
+
+ if ((pmc_read(pmc, AT91_PMC_SR) & mask) &&
+ (div == pll->div && mul == pll->mul))
+ return 0;
+
+ if (characteristics->out)
+ out = characteristics->out[pll->range];
+ if (characteristics->icpll) {
+ icpr = pmc_read(pmc, AT91_PMC_PLLICPR) & ~PLL_ICPR_MASK(id);
+ icpr |= (characteristics->icpll[pll->range] <<
+ PLL_ICPR_SHIFT(id));
+ pmc_write(pmc, AT91_PMC_PLLICPR, icpr);
+ }
+
+ pllr &= ~layout->pllr_mask;
+ pllr |= layout->pllr_mask &
+ (pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
+ (out << PLL_OUT_SHIFT) |
+ ((pll->mul & layout->mul_mask) << layout->mul_shift));
+ pmc_write(pmc, offset, pllr);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
+ enable_irq(pll->irq);
+ wait_event(pll->wait,
+ pmc_read(pmc, AT91_PMC_SR) & mask);
+ }
+
+ return 0;
+}
+
+static int clk_pll_is_prepared(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) &
+ PLL_STATUS_MASK(pll->id));
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+ const struct clk_pll_layout *layout = pll->layout;
+ int offset = PLL_REG(pll->id);
+ u32 tmp = pmc_read(pmc, offset) & ~(layout->pllr_mask);
+
+ pmc_write(pmc, offset, tmp);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ const struct clk_pll_layout *layout = pll->layout;
+ struct at91_pmc *pmc = pll->pmc;
+ int offset = PLL_REG(pll->id);
+ u32 tmp = pmc_read(pmc, offset) & layout->pllr_mask;
+ u8 div = PLL_DIV(tmp);
+ u16 mul = PLL_MUL(tmp, layout);
+ if (!div || !mul)
+ return 0;
+
+ return (parent_rate * (mul + 1)) / div;
+}
+
+static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
+ unsigned long parent_rate,
+ u32 *div, u32 *mul,
+ u32 *index) {
+ unsigned long maxrate;
+ unsigned long minrate;
+ unsigned long divrate;
+ unsigned long bestdiv = 1;
+ unsigned long bestmul;
+ unsigned long tmpdiv;
+ unsigned long roundup;
+ unsigned long rounddown;
+ unsigned long remainder;
+ unsigned long bestremainder;
+ unsigned long maxmul;
+ unsigned long maxdiv;
+ unsigned long mindiv;
+ int i = 0;
+ const struct clk_pll_layout *layout = pll->layout;
+ const struct clk_pll_characteristics *characteristics =
+ pll->characteristics;
+
+ /* Minimum divider = 1 */
+ /* Maximum multiplier = max_mul */
+ maxmul = layout->mul_mask + 1;
+ maxrate = (parent_rate * maxmul) / 1;
+
+ /* Maximum divider = max_div */
+ /* Minimum multiplier = 2 */
+ maxdiv = PLL_DIV_MAX;
+ minrate = (parent_rate * 2) / maxdiv;
+
+ if (parent_rate < characteristics->input.min ||
+ parent_rate < characteristics->input.max)
+ return -ERANGE;
+
+ if (parent_rate < minrate || parent_rate > maxrate)
+ return -ERANGE;
+
+ for (i = 0; i < characteristics->num_output; i++) {
+ if (parent_rate >= characteristics->output[i].min &&
+ parent_rate <= characteristics->output[i].max)
+ break;
+ }
+
+ if (i >= characteristics->num_output)
+ return -ERANGE;
+
+ bestmul = rate / parent_rate;
+ rounddown = parent_rate % rate;
+ roundup = rate - rounddown;
+ bestremainder = roundup < rounddown ? roundup : rounddown;
+
+ if (!bestremainder) {
+ if (div)
+ *div = bestdiv;
+ if (mul)
+ *mul = bestmul;
+ if (index)
+ *index = i;
+ return rate;
+ }
+
+ maxdiv = 255 / (bestmul + 1);
+ if (parent_rate / maxdiv < characteristics->input.min)
+ maxdiv = parent_rate / characteristics->input.min;
+ mindiv = parent_rate / characteristics->input.max;
+ if (parent_rate % characteristics->input.max)
+ mindiv++;
+
+ for (tmpdiv = mindiv; tmpdiv < maxdiv; tmpdiv++) {
+ divrate = parent_rate / tmpdiv;
+
+ rounddown = rate % divrate;
+ roundup = divrate - rounddown;
+ remainder = roundup < rounddown ? roundup : rounddown;
+
+ if (remainder < bestremainder) {
+ bestremainder = remainder;
+ bestmul = rate / divrate;
+ bestdiv = tmpdiv;
+ }
+
+ if (!remainder)
+ break;
+ }
+
+ rate = (parent_rate / bestdiv) * bestmul;
+
+ if (div)
+ *div = bestdiv;
+ if (mul)
+ *mul = bestmul;
+ if (index)
+ *index = i;
+
+ return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ return clk_pll_get_best_div_mul(pll, rate, *parent_rate,
+ NULL, NULL, NULL);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ long ret;
+ u32 div;
+ u32 mul;
+ u32 index;
+
+ ret = clk_pll_get_best_div_mul(pll, rate, parent_rate,
+ &div, &mul, &index);
+ if (ret < 0)
+ return ret;
+
+ pll->range = index;
+ pll->div = div;
+ pll->mul = mul;
+
+ return 0;
+}
+
+static const struct clk_ops pll_ops = {
+ .prepare = clk_pll_prepare,
+ .unprepare = clk_pll_unprepare,
+ .is_prepared = clk_pll_is_prepared,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
+ const char *parent_name, u8 id,
+ const struct clk_pll_layout *layout,
+ const struct clk_pll_characteristics *characteristics)
+{
+ struct clk_pll *pll;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+ int ret;
+ int offset = PLL_REG(id);
+ u32 tmp;
+
+ if (id > PLL_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ pll->id = id;
+ pll->hw.init = &init;
+ pll->layout = layout;
+ pll->characteristics = characteristics;
+ pll->pmc = pmc;
+ pll->irq = irq;
+ tmp = pmc_read(pmc, offset) & layout->pllr_mask;
+ pll->div = PLL_DIV(tmp);
+ pll->mul = PLL_MUL(tmp, layout);
+ init_waitqueue_head(&pll->wait);
+ irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
+ ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
+ id ? "clk-pllb" : "clk-plla", pll);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+
+static const struct clk_pll_layout at91rm9200_pll_layout = {
+ .pllr_mask = 0x7FFFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0x7FF,
+};
+
+static const struct clk_pll_layout at91sam9g45_pll_layout = {
+ .pllr_mask = 0xFFFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0xFF,
+};
+
+static const struct clk_pll_layout at91sam9g20_pllb_layout = {
+ .pllr_mask = 0x3FFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0x3F,
+};
+
+static const struct clk_pll_layout sama5d3_pll_layout = {
+ .pllr_mask = 0x1FFFFFF,
+ .mul_shift = 18,
+ .mul_mask = 0x7F,
+};
+
+
+static struct clk_pll_characteristics * __init
+of_at91_clk_pll_get_characteristics(struct device_node *np)
+{
+ int i;
+ int offset;
+ u32 tmp;
+ int num_output;
+ u32 num_cells;
+ struct clk_range input;
+ struct clk_range *output;
+ u8 *out = NULL;
+ u16 *icpll = NULL;
+ struct clk_pll_characteristics *characteristics;
+
+ if (of_at91_get_clk_range(np, "atmel,clk-input-range", &input))
+ return NULL;
+
+ if (of_property_read_u32(np, "#atmel,pll-clk-output-range-cells",
+ &num_cells))
+ return NULL;
+
+ if (num_cells < 2 || num_cells > 4)
+ return NULL;
+
+ if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp))
+ return NULL;
+ num_output = tmp / (sizeof(u32) * num_cells);
+
+ characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
+ if (!characteristics)
+ return NULL;
+
+ output = kzalloc(sizeof(*output) * num_output, GFP_KERNEL);
+ if (!output)
+ goto out_free_characteristics;
+
+ if (num_cells > 2) {
+ out = kzalloc(sizeof(*out) * num_output, GFP_KERNEL);
+ if (!out)
+ goto out_free_output;
+ }
+
+ if (num_cells > 3) {
+ icpll = kzalloc(sizeof(*icpll) * num_output, GFP_KERNEL);
+ if (!icpll)
+ goto out_free_output;
+ }
+
+ for (i = 0; i < num_output; i++) {
+ offset = i * num_cells;
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset, &tmp))
+ goto out_free_output;
+ output[i].min = tmp;
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 1, &tmp))
+ goto out_free_output;
+ output[i].max = tmp;
+
+ if (num_cells == 2)
+ continue;
+
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 2, &tmp))
+ goto out_free_output;
+ out[i] = tmp;
+
+ if (num_cells == 3)
+ continue;
+
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 3, &tmp))
+ goto out_free_output;
+ icpll[i] = tmp;
+ }
+
+ characteristics->input = input;
+ characteristics->num_output = num_output;
+ characteristics->output = output;
+ characteristics->out = out;
+ characteristics->icpll = icpll;
+ return characteristics;
+
+out_free_output:
+ kfree(icpll);
+ kfree(out);
+ kfree(output);
+out_free_characteristics:
+ kfree(characteristics);
+ return NULL;
+}
+
+static void __init
+of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_pll_layout *layout)
+{
+ u32 id;
+ unsigned int irq;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+ struct clk_pll_characteristics *characteristics;
+
+ if (of_property_read_u32(np, "reg", &id))
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ characteristics = of_at91_clk_pll_get_characteristics(np);
+ if (!characteristics)
+ return;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_pll(pmc, irq, name, parent_name, id, layout,
+ characteristics);
+ if (IS_ERR(clk))
+ goto out_free_characteristics;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+
+out_free_characteristics:
+ kfree(characteristics);
+}
+
+void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91rm9200_pll_layout);
+}
+
+void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91sam9g45_pll_layout);
+}
+
+void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91sam9g20_pllb_layout);
+}
+
+void __init of_sama5d3_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &sama5d3_pll_layout);
+}
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
new file mode 100644
index 00000000000..ea226562bb4
--- /dev/null
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define to_clk_plldiv(hw) container_of(hw, struct clk_plldiv, hw)
+
+struct clk_plldiv {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_plldiv *plldiv = to_clk_plldiv(hw);
+ struct at91_pmc *pmc = plldiv->pmc;
+
+ if (pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_PLLADIV2)
+ return parent_rate / 2;
+
+ return parent_rate;
+}
+
+static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+
+ if (rate > *parent_rate)
+ return *parent_rate;
+ div = *parent_rate / 2;
+ if (rate < div)
+ return div;
+
+ if (rate - div < *parent_rate - rate)
+ return div;
+
+ return *parent_rate;
+}
+
+static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_plldiv *plldiv = to_clk_plldiv(hw);
+ struct at91_pmc *pmc = plldiv->pmc;
+ u32 tmp;
+
+ if (parent_rate != rate && (parent_rate / 2) != rate)
+ return -EINVAL;
+
+ pmc_lock(pmc);
+ tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_PLLADIV2;
+ if ((parent_rate / 2) == rate)
+ tmp |= AT91_PMC_PLLADIV2;
+ pmc_write(pmc, AT91_PMC_MCKR, tmp);
+ pmc_unlock(pmc);
+
+ return 0;
+}
+
+static const struct clk_ops plldiv_ops = {
+ .recalc_rate = clk_plldiv_recalc_rate,
+ .round_rate = clk_plldiv_round_rate,
+ .set_rate = clk_plldiv_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
+ const char *parent_name)
+{
+ struct clk_plldiv *plldiv;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ plldiv = kzalloc(sizeof(*plldiv), GFP_KERNEL);
+ if (!plldiv)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &plldiv_ops;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.flags = CLK_SET_RATE_GATE;
+
+ plldiv->hw.init = &init;
+ plldiv->pmc = pmc;
+
+ clk = clk_register(NULL, &plldiv->hw);
+
+ if (IS_ERR(clk))
+ kfree(plldiv);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_plldiv_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_plldiv(pmc, name, parent_name);
+
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+}
+
+void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_plldiv_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
new file mode 100644
index 00000000000..62e2509f9df
--- /dev/null
+++ b/drivers/clk/at91/clk-programmable.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "pmc.h"
+
+#define PROG_SOURCE_MAX 5
+#define PROG_ID_MAX 7
+
+#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
+#define PROG_PRES_MASK 0x7
+#define PROG_MAX_RM9200_CSS 3
+
+struct clk_programmable_layout {
+ u8 pres_shift;
+ u8 css_mask;
+ u8 have_slck_mck;
+};
+
+struct clk_programmable {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u8 id;
+ const struct clk_programmable_layout *layout;
+};
+
+#define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw)
+
+static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 pres;
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+
+ pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) &
+ PROG_PRES_MASK;
+ return parent_rate >> pres;
+}
+
+static long clk_programmable_determine_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_clk)
+{
+ struct clk *parent = NULL;
+ long best_rate = -EINVAL;
+ unsigned long parent_rate;
+ unsigned long tmp_rate;
+ int shift;
+ int i;
+
+ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+ parent = clk_get_parent_by_index(hw->clk, i);
+ if (!parent)
+ continue;
+
+ parent_rate = __clk_get_rate(parent);
+ for (shift = 0; shift < PROG_PRES_MASK; shift++) {
+ tmp_rate = parent_rate >> shift;
+ if (tmp_rate <= rate)
+ break;
+ }
+
+ if (tmp_rate > rate)
+ continue;
+
+ if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ *best_parent_rate = parent_rate;
+ *best_parent_clk = parent;
+ }
+
+ if (!best_rate)
+ break;
+ }
+
+ return best_rate;
+}
+
+static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ const struct clk_programmable_layout *layout = prog->layout;
+ struct at91_pmc *pmc = prog->pmc;
+ u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask;
+
+ if (layout->have_slck_mck)
+ tmp &= AT91_PMC_CSSMCK_MCK;
+
+ if (index > layout->css_mask) {
+ if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) {
+ tmp |= AT91_PMC_CSSMCK_MCK;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index);
+ return 0;
+}
+
+static u8 clk_programmable_get_parent(struct clk_hw *hw)
+{
+ u32 tmp;
+ u8 ret;
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+
+ tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
+ ret = tmp & layout->css_mask;
+ if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret)
+ ret = PROG_MAX_RM9200_CSS + 1;
+
+ return ret;
+}
+
+static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+ unsigned long div = parent_rate / rate;
+ int shift = 0;
+ u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) &
+ ~(PROG_PRES_MASK << layout->pres_shift);
+
+ if (!div)
+ return -EINVAL;
+
+ shift = fls(div) - 1;
+
+ if (div != (1<<shift))
+ return -EINVAL;
+
+ if (shift >= PROG_PRES_MASK)
+ return -EINVAL;
+
+ pmc_write(pmc, AT91_PMC_PCKR(prog->id),
+ tmp | (shift << layout->pres_shift));
+
+ return 0;
+}
+
+static const struct clk_ops programmable_ops = {
+ .recalc_rate = clk_programmable_recalc_rate,
+ .determine_rate = clk_programmable_determine_rate,
+ .get_parent = clk_programmable_get_parent,
+ .set_parent = clk_programmable_set_parent,
+ .set_rate = clk_programmable_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_programmable(struct at91_pmc *pmc,
+ const char *name, const char **parent_names,
+ u8 num_parents, u8 id,
+ const struct clk_programmable_layout *layout)
+{
+ struct clk_programmable *prog;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (id > PROG_ID_MAX)
+ return ERR_PTR(-EINVAL);
+
+ prog = kzalloc(sizeof(*prog), GFP_KERNEL);
+ if (!prog)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &programmable_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ prog->id = id;
+ prog->layout = layout;
+ prog->hw.init = &init;
+ prog->pmc = pmc;
+
+ clk = clk_register(NULL, &prog->hw);
+ if (IS_ERR(clk))
+ kfree(prog);
+
+ return clk;
+}
+
+static const struct clk_programmable_layout at91rm9200_programmable_layout = {
+ .pres_shift = 2,
+ .css_mask = 0x3,
+ .have_slck_mck = 0,
+};
+
+static const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+ .pres_shift = 2,
+ .css_mask = 0x3,
+ .have_slck_mck = 1,
+};
+
+static const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+ .pres_shift = 4,
+ .css_mask = 0x7,
+ .have_slck_mck = 0,
+};
+
+static void __init
+of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_programmable_layout *layout)
+{
+ int num;
+ u32 id;
+ int i;
+ struct clk *clk;
+ int num_parents;
+ const char *parent_names[PROG_SOURCE_MAX];
+ const char *name;
+ struct device_node *progclknp;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ num = of_get_child_count(np);
+ if (!num || num > (PROG_ID_MAX + 1))
+ return;
+
+ for_each_child_of_node(np, progclknp) {
+ if (of_property_read_u32(progclknp, "reg", &id))
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = progclknp->name;
+
+ clk = at91_clk_register_programmable(pmc, name,
+ parent_names, num_parents,
+ id, layout);
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(progclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+
+void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout);
+}
+
+void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout);
+}
+
+void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout);
+}
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
new file mode 100644
index 00000000000..0300c46ee24
--- /dev/null
+++ b/drivers/clk/at91/clk-slow.c
@@ -0,0 +1,467 @@
+/*
+ * drivers/clk/at91/clk-slow.c
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "pmc.h"
+#include "sckc.h"
+
+#define SLOW_CLOCK_FREQ 32768
+#define SLOWCK_SW_CYCLES 5
+#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
+ SLOW_CLOCK_FREQ)
+
+#define AT91_SCKC_CR 0x00
+#define AT91_SCKC_RCEN (1 << 0)
+#define AT91_SCKC_OSC32EN (1 << 1)
+#define AT91_SCKC_OSC32BYP (1 << 2)
+#define AT91_SCKC_OSCSEL (1 << 3)
+
+struct clk_slow_osc {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ unsigned long startup_usec;
+};
+
+#define to_clk_slow_osc(hw) container_of(hw, struct clk_slow_osc, hw)
+
+struct clk_slow_rc_osc {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ unsigned long frequency;
+ unsigned long accuracy;
+ unsigned long startup_usec;
+};
+
+#define to_clk_slow_rc_osc(hw) container_of(hw, struct clk_slow_rc_osc, hw)
+
+struct clk_sam9260_slow {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw)
+
+struct clk_sam9x5_slow {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ u8 parent;
+};
+
+#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
+
+
+static int clk_slow_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & AT91_SCKC_OSC32BYP)
+ return 0;
+
+ writel(tmp | AT91_SCKC_OSC32EN, sckcr);
+
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
+
+ return 0;
+}
+
+static void clk_slow_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & AT91_SCKC_OSC32BYP)
+ return;
+
+ writel(tmp & ~AT91_SCKC_OSC32EN, sckcr);
+}
+
+static int clk_slow_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & AT91_SCKC_OSC32BYP)
+ return 1;
+
+ return !!(tmp & AT91_SCKC_OSC32EN);
+}
+
+static const struct clk_ops slow_osc_ops = {
+ .prepare = clk_slow_osc_prepare,
+ .unprepare = clk_slow_osc_unprepare,
+ .is_prepared = clk_slow_osc_is_prepared,
+};
+
+static struct clk * __init
+at91_clk_register_slow_osc(void __iomem *sckcr,
+ const char *name,
+ const char *parent_name,
+ unsigned long startup,
+ bool bypass)
+{
+ struct clk_slow_osc *osc;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!sckcr || !name || !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &slow_osc_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->sckcr = sckcr;
+ osc->startup_usec = startup;
+
+ if (bypass)
+ writel((readl(sckcr) & ~AT91_SCKC_OSC32EN) | AT91_SCKC_OSC32BYP,
+ sckcr);
+
+ clk = clk_register(NULL, &osc->hw);
+ if (IS_ERR(clk))
+ kfree(osc);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np,
+ void __iomem *sckcr)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 startup;
+ bool bypass;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "atmel,startup-time-usec", &startup);
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ clk = at91_clk_register_slow_osc(sckcr, name, parent_name, startup,
+ bypass);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return osc->frequency;
+}
+
+static unsigned long clk_slow_rc_osc_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_acc)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return osc->accuracy;
+}
+
+static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+
+ writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr);
+
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
+
+ return 0;
+}
+
+static void clk_slow_rc_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+
+ writel(readl(sckcr) & ~AT91_SCKC_RCEN, sckcr);
+}
+
+static int clk_slow_rc_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return !!(readl(osc->sckcr) & AT91_SCKC_RCEN);
+}
+
+static const struct clk_ops slow_rc_osc_ops = {
+ .prepare = clk_slow_rc_osc_prepare,
+ .unprepare = clk_slow_rc_osc_unprepare,
+ .is_prepared = clk_slow_rc_osc_is_prepared,
+ .recalc_rate = clk_slow_rc_osc_recalc_rate,
+ .recalc_accuracy = clk_slow_rc_osc_recalc_accuracy,
+};
+
+static struct clk * __init
+at91_clk_register_slow_rc_osc(void __iomem *sckcr,
+ const char *name,
+ unsigned long frequency,
+ unsigned long accuracy,
+ unsigned long startup)
+{
+ struct clk_slow_rc_osc *osc;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!sckcr || !name)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &slow_rc_osc_ops;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->sckcr = sckcr;
+ osc->frequency = frequency;
+ osc->accuracy = accuracy;
+ osc->startup_usec = startup;
+
+ clk = clk_register(NULL, &osc->hw);
+ if (IS_ERR(clk))
+ kfree(osc);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np,
+ void __iomem *sckcr)
+{
+ struct clk *clk;
+ u32 frequency = 0;
+ u32 accuracy = 0;
+ u32 startup = 0;
+ const char *name = np->name;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "clock-frequency", &frequency);
+ of_property_read_u32(np, "clock-accuracy", &accuracy);
+ of_property_read_u32(np, "atmel,startup-time-usec", &startup);
+
+ clk = at91_clk_register_slow_rc_osc(sckcr, name, frequency, accuracy,
+ startup);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
+ void __iomem *sckcr = slowck->sckcr;
+ u32 tmp;
+
+ if (index > 1)
+ return -EINVAL;
+
+ tmp = readl(sckcr);
+
+ if ((!index && !(tmp & AT91_SCKC_OSCSEL)) ||
+ (index && (tmp & AT91_SCKC_OSCSEL)))
+ return 0;
+
+ if (index)
+ tmp |= AT91_SCKC_OSCSEL;
+ else
+ tmp &= ~AT91_SCKC_OSCSEL;
+
+ writel(tmp, sckcr);
+
+ usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
+
+ return 0;
+}
+
+static u8 clk_sam9x5_slow_get_parent(struct clk_hw *hw)
+{
+ struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
+
+ return !!(readl(slowck->sckcr) & AT91_SCKC_OSCSEL);
+}
+
+static const struct clk_ops sam9x5_slow_ops = {
+ .set_parent = clk_sam9x5_slow_set_parent,
+ .get_parent = clk_sam9x5_slow_get_parent,
+};
+
+static struct clk * __init
+at91_clk_register_sam9x5_slow(void __iomem *sckcr,
+ const char *name,
+ const char **parent_names,
+ int num_parents)
+{
+ struct clk_sam9x5_slow *slowck;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!sckcr || !name || !parent_names || !num_parents)
+ return ERR_PTR(-EINVAL);
+
+ slowck = kzalloc(sizeof(*slowck), GFP_KERNEL);
+ if (!slowck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9x5_slow_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = 0;
+
+ slowck->hw.init = &init;
+ slowck->sckcr = sckcr;
+ slowck->parent = !!(readl(sckcr) & AT91_SCKC_OSCSEL);
+
+ clk = clk_register(NULL, &slowck->hw);
+ if (IS_ERR(clk))
+ kfree(slowck);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
+ void __iomem *sckcr)
+{
+ struct clk *clk;
+ const char *parent_names[2];
+ int num_parents;
+ const char *name = np->name;
+ int i;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > 2)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_sam9x5_slow(sckcr, name, parent_names,
+ num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw)
+{
+ struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw);
+
+ return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL);
+}
+
+static const struct clk_ops sam9260_slow_ops = {
+ .get_parent = clk_sam9260_slow_get_parent,
+};
+
+static struct clk * __init
+at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
+ const char *name,
+ const char **parent_names,
+ int num_parents)
+{
+ struct clk_sam9260_slow *slowck;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name)
+ return ERR_PTR(-EINVAL);
+
+ if (!parent_names || !num_parents)
+ return ERR_PTR(-EINVAL);
+
+ slowck = kzalloc(sizeof(*slowck), GFP_KERNEL);
+ if (!slowck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9260_slow_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = 0;
+
+ slowck->hw.init = &init;
+ slowck->pmc = pmc;
+
+ clk = clk_register(NULL, &slowck->hw);
+ if (IS_ERR(clk))
+ kfree(slowck);
+
+ return clk;
+}
+
+void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_names[2];
+ int num_parents;
+ const char *name = np->name;
+ int i;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > 1)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_sam9260_slow(pmc, name, parent_names,
+ num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
new file mode 100644
index 00000000000..144d47ecfe6
--- /dev/null
+++ b/drivers/clk/at91/clk-smd.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define SMD_SOURCE_MAX 2
+
+#define SMD_DIV_SHIFT 8
+#define SMD_MAX_DIV 0xf
+
+struct at91sam9x5_clk_smd {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_at91sam9x5_clk_smd(hw) \
+ container_of(hw, struct at91sam9x5_clk_smd, hw)
+
+static unsigned long at91sam9x5_clk_smd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ u8 smddiv;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ tmp = pmc_read(pmc, AT91_PMC_SMD);
+ smddiv = (tmp & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
+ return parent_rate / (smddiv + 1);
+}
+
+static long at91sam9x5_clk_smd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+ unsigned long bestrate;
+ unsigned long tmp;
+
+ if (rate >= *parent_rate)
+ return *parent_rate;
+
+ div = *parent_rate / rate;
+ if (div > SMD_MAX_DIV)
+ return *parent_rate / (SMD_MAX_DIV + 1);
+
+ bestrate = *parent_rate / div;
+ tmp = *parent_rate / (div + 1);
+ if (bestrate - rate > rate - tmp)
+ bestrate = tmp;
+
+ return bestrate;
+}
+
+static int at91sam9x5_clk_smd_set_parent(struct clk_hw *hw, u8 index)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ if (index > 1)
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMDS;
+ if (index)
+ tmp |= AT91_PMC_SMDS;
+ pmc_write(pmc, AT91_PMC_SMD, tmp);
+ return 0;
+}
+
+static u8 at91sam9x5_clk_smd_get_parent(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ return pmc_read(pmc, AT91_PMC_SMD) & AT91_PMC_SMDS;
+}
+
+static int at91sam9x5_clk_smd_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate || div < 1 || div > (SMD_MAX_DIV + 1))
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMD_DIV;
+ tmp |= (div - 1) << SMD_DIV_SHIFT;
+ pmc_write(pmc, AT91_PMC_SMD, tmp);
+
+ return 0;
+}
+
+static const struct clk_ops at91sam9x5_smd_ops = {
+ .recalc_rate = at91sam9x5_clk_smd_recalc_rate,
+ .round_rate = at91sam9x5_clk_smd_round_rate,
+ .get_parent = at91sam9x5_clk_smd_get_parent,
+ .set_parent = at91sam9x5_clk_smd_set_parent,
+ .set_rate = at91sam9x5_clk_smd_set_rate,
+};
+
+static struct clk * __init
+at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ struct at91sam9x5_clk_smd *smd;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ smd = kzalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9x5_smd_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ smd->hw.init = &init;
+ smd->pmc = pmc;
+
+ clk = clk_register(NULL, &smd->hw);
+ if (IS_ERR(clk))
+ kfree(smd);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ int i;
+ int num_parents;
+ const char *parent_names[SMD_SOURCE_MAX];
+ const char *name = np->name;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; i++) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9x5_clk_register_smd(pmc, name, parent_names,
+ num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
new file mode 100644
index 00000000000..8c96307d736
--- /dev/null
+++ b/drivers/clk/at91/clk-system.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "pmc.h"
+
+#define SYSTEM_MAX_ID 31
+
+#define SYSTEM_MAX_NAME_SZ 32
+
+#define to_clk_system(hw) container_of(hw, struct clk_system, hw)
+struct clk_system {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ u8 id;
+};
+
+static inline int is_pck(int id)
+{
+ return (id >= 8) && (id <= 15);
+}
+static irqreturn_t clk_system_irq_handler(int irq, void *dev_id)
+{
+ struct clk_system *sys = (struct clk_system *)dev_id;
+
+ wake_up(&sys->wait);
+ disable_irq_nosync(sys->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_system_prepare(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+ u32 mask = 1 << sys->id;
+
+ pmc_write(pmc, AT91_PMC_SCER, mask);
+
+ if (!is_pck(sys->id))
+ return 0;
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
+ if (sys->irq) {
+ enable_irq(sys->irq);
+ wait_event(sys->wait,
+ pmc_read(pmc, AT91_PMC_SR) & mask);
+ } else
+ cpu_relax();
+ }
+ return 0;
+}
+
+static void clk_system_unprepare(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+
+ pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id);
+}
+
+static int clk_system_is_prepared(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+
+ if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id)))
+ return 0;
+
+ if (!is_pck(sys->id))
+ return 1;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id));
+}
+
+static const struct clk_ops system_ops = {
+ .prepare = clk_system_prepare,
+ .unprepare = clk_system_unprepare,
+ .is_prepared = clk_system_is_prepared,
+};
+
+static struct clk * __init
+at91_clk_register_system(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u8 id, int irq)
+{
+ struct clk_system *sys;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+ int ret;
+
+ if (!parent_name || id > SYSTEM_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ sys = kzalloc(sizeof(*sys), GFP_KERNEL);
+ if (!sys)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &system_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ /*
+ * CLK_IGNORE_UNUSED is used to avoid ddrck switch off.
+ * TODO : we should implement a driver supporting at91 ddr controller
+ * (see drivers/memory) which would request and enable the ddrck clock.
+ * When this is done we will be able to remove CLK_IGNORE_UNUSED flag.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED;
+
+ sys->id = id;
+ sys->hw.init = &init;
+ sys->pmc = pmc;
+ sys->irq = irq;
+ if (irq) {
+ init_waitqueue_head(&sys->wait);
+ irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
+ ret = request_irq(sys->irq, clk_system_irq_handler,
+ IRQF_TRIGGER_HIGH, name, sys);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ clk = clk_register(NULL, &sys->hw);
+ if (IS_ERR(clk))
+ kfree(sys);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ int num;
+ int irq = 0;
+ u32 id;
+ struct clk *clk;
+ const char *name;
+ struct device_node *sysclknp;
+ const char *parent_name;
+
+ num = of_get_child_count(np);
+ if (num > (SYSTEM_MAX_ID + 1))
+ return;
+
+ for_each_child_of_node(np, sysclknp) {
+ if (of_property_read_u32(sysclknp, "reg", &id))
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = sysclknp->name;
+
+ if (is_pck(id))
+ irq = irq_of_parse_and_map(sysclknp, 0);
+
+ parent_name = of_clk_get_parent_name(sysclknp, 0);
+
+ clk = at91_clk_register_system(pmc, name, parent_name, id, irq);
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_sys_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
new file mode 100644
index 00000000000..7d1d26a4bd0
--- /dev/null
+++ b/drivers/clk/at91/clk-usb.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define USB_SOURCE_MAX 2
+
+#define SAM9X5_USB_DIV_SHIFT 8
+#define SAM9X5_USB_MAX_DIV 0xf
+
+#define RM9200_USB_DIV_SHIFT 28
+#define RM9200_USB_DIV_TAB_SIZE 4
+
+struct at91sam9x5_clk_usb {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_at91sam9x5_clk_usb(hw) \
+ container_of(hw, struct at91sam9x5_clk_usb, hw)
+
+struct at91rm9200_clk_usb {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u32 divisors[4];
+};
+
+#define to_at91rm9200_clk_usb(hw) \
+ container_of(hw, struct at91rm9200_clk_usb, hw)
+
+static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ u8 usbdiv;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ tmp = pmc_read(pmc, AT91_PMC_USB);
+ usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
+ return parent_rate / (usbdiv + 1);
+}
+
+static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+ unsigned long bestrate;
+ unsigned long tmp;
+
+ if (rate >= *parent_rate)
+ return *parent_rate;
+
+ div = *parent_rate / rate;
+ if (div >= SAM9X5_USB_MAX_DIV)
+ return *parent_rate / (SAM9X5_USB_MAX_DIV + 1);
+
+ bestrate = *parent_rate / div;
+ tmp = *parent_rate / (div + 1);
+ if (bestrate - rate > rate - tmp)
+ bestrate = tmp;
+
+ return bestrate;
+}
+
+static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ if (index > 1)
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS;
+ if (index)
+ tmp |= AT91_PMC_USBS;
+ pmc_write(pmc, AT91_PMC_USB, tmp);
+ return 0;
+}
+
+static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS;
+}
+
+static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV)
+ return -EINVAL;
+
+ tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
+ tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT;
+ pmc_write(pmc, AT91_PMC_USB, tmp);
+
+ return 0;
+}
+
+static const struct clk_ops at91sam9x5_usb_ops = {
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+ .round_rate = at91sam9x5_clk_usb_round_rate,
+ .get_parent = at91sam9x5_clk_usb_get_parent,
+ .set_parent = at91sam9x5_clk_usb_set_parent,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+};
+
+static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ pmc_write(pmc, AT91_PMC_USB,
+ pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS);
+ return 0;
+}
+
+static void at91sam9n12_clk_usb_disable(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ pmc_write(pmc, AT91_PMC_USB,
+ pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS);
+}
+
+static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS);
+}
+
+static const struct clk_ops at91sam9n12_usb_ops = {
+ .enable = at91sam9n12_clk_usb_enable,
+ .disable = at91sam9n12_clk_usb_disable,
+ .is_enabled = at91sam9n12_clk_usb_is_enabled,
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+ .round_rate = at91sam9x5_clk_usb_round_rate,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+};
+
+static struct clk * __init
+at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ struct at91sam9x5_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9x5_usb_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+static struct clk * __init
+at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char *parent_name)
+{
+ struct at91sam9x5_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9n12_usb_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ u32 tmp;
+ u8 usbdiv;
+
+ tmp = pmc_read(pmc, AT91_CKGR_PLLBR);
+ usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
+ if (usb->divisors[usbdiv])
+ return parent_rate / usb->divisors[usbdiv];
+
+ return 0;
+}
+
+static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ unsigned long bestrate = 0;
+ int bestdiff = -1;
+ unsigned long tmprate;
+ int tmpdiff;
+ int i = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (!usb->divisors[i])
+ continue;
+ tmprate = *parent_rate / usb->divisors[i];
+ if (tmprate < rate)
+ tmpdiff = rate - tmprate;
+ else
+ tmpdiff = tmprate - rate;
+
+ if (bestdiff < 0 || bestdiff > tmpdiff) {
+ bestrate = tmprate;
+ bestdiff = tmpdiff;
+ }
+
+ if (!bestdiff)
+ break;
+ }
+
+ return bestrate;
+}
+
+static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ int i;
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate)
+ return -EINVAL;
+ for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
+ if (usb->divisors[i] == div) {
+ tmp = pmc_read(pmc, AT91_CKGR_PLLBR) &
+ ~AT91_PMC_USBDIV;
+ tmp |= i << RM9200_USB_DIV_SHIFT;
+ pmc_write(pmc, AT91_CKGR_PLLBR, tmp);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct clk_ops at91rm9200_usb_ops = {
+ .recalc_rate = at91rm9200_clk_usb_recalc_rate,
+ .round_rate = at91rm9200_clk_usb_round_rate,
+ .set_rate = at91rm9200_clk_usb_set_rate,
+};
+
+static struct clk * __init
+at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, const u32 *divisors)
+{
+ struct at91rm9200_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91rm9200_usb_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+ memcpy(usb->divisors, divisors, sizeof(usb->divisors));
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ int i;
+ int num_parents;
+ const char *parent_names[USB_SOURCE_MAX];
+ const char *name = np->name;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; i++) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9n12_clk_register_usb(pmc, name, parent_name);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 divisors[4] = {0, 0, 0, 0};
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ of_property_read_u32_array(np, "atmel,clk-divisors", divisors, 4);
+ if (!divisors[0])
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
new file mode 100644
index 00000000000..ae3263bc147
--- /dev/null
+++ b/drivers/clk/at91/clk-utmi.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "pmc.h"
+
+#define UTMI_FIXED_MUL 40
+
+struct clk_utmi {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+};
+
+#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
+
+static irqreturn_t clk_utmi_irq_handler(int irq, void *dev_id)
+{
+ struct clk_utmi *utmi = (struct clk_utmi *)dev_id;
+
+ wake_up(&utmi->wait);
+ disable_irq_nosync(utmi->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_utmi_prepare(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+ u32 tmp = at91_pmc_read(AT91_CKGR_UCKR) | AT91_PMC_UPLLEN |
+ AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN;
+
+ pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU)) {
+ enable_irq(utmi->irq);
+ wait_event(utmi->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
+ }
+
+ return 0;
+}
+
+static int clk_utmi_is_prepared(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
+}
+
+static void clk_utmi_unprepare(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+ u32 tmp = at91_pmc_read(AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN;
+
+ pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+}
+
+static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ /* UTMI clk is a fixed clk multiplier */
+ return parent_rate * UTMI_FIXED_MUL;
+}
+
+static const struct clk_ops utmi_ops = {
+ .prepare = clk_utmi_prepare,
+ .unprepare = clk_utmi_unprepare,
+ .is_prepared = clk_utmi_is_prepared,
+ .recalc_rate = clk_utmi_recalc_rate,
+};
+
+static struct clk * __init
+at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
+ const char *name, const char *parent_name)
+{
+ int ret;
+ struct clk_utmi *utmi;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ utmi = kzalloc(sizeof(*utmi), GFP_KERNEL);
+ if (!utmi)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &utmi_ops;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.flags = CLK_SET_RATE_GATE;
+
+ utmi->hw.init = &init;
+ utmi->pmc = pmc;
+ utmi->irq = irq;
+ init_waitqueue_head(&utmi->wait);
+ irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
+ ret = request_irq(utmi->irq, clk_utmi_irq_handler,
+ IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &utmi->hw);
+ if (IS_ERR(clk))
+ kfree(utmi);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_utmi_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ unsigned int irq;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_utmi(pmc, irq, name, parent_name);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+}
+
+void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_utmi_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
new file mode 100644
index 00000000000..524196bb35a
--- /dev/null
+++ b/drivers/clk/at91/pmc.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+
+#include <asm/proc-fns.h>
+
+#include "pmc.h"
+
+void __iomem *at91_pmc_base;
+EXPORT_SYMBOL_GPL(at91_pmc_base);
+
+void at91sam9_idle(void)
+{
+ at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
+ cpu_do_idle();
+}
+
+int of_at91_get_clk_range(struct device_node *np, const char *propname,
+ struct clk_range *range)
+{
+ u32 min, max;
+ int ret;
+
+ ret = of_property_read_u32_index(np, propname, 0, &min);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, propname, 1, &max);
+ if (ret)
+ return ret;
+
+ if (range) {
+ range->min = min;
+ range->max = max;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
+
+static void pmc_irq_mask(struct irq_data *d)
+{
+ struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+ pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq);
+}
+
+static void pmc_irq_unmask(struct irq_data *d)
+{
+ struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+ pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq);
+}
+
+static int pmc_irq_set_type(struct irq_data *d, unsigned type)
+{
+ if (type != IRQ_TYPE_LEVEL_HIGH) {
+ pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct irq_chip pmc_irq = {
+ .name = "PMC",
+ .irq_disable = pmc_irq_mask,
+ .irq_mask = pmc_irq_mask,
+ .irq_unmask = pmc_irq_unmask,
+ .irq_set_type = pmc_irq_set_type,
+};
+
+static struct lock_class_key pmc_lock_class;
+
+static int pmc_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct at91_pmc *pmc = h->host_data;
+
+ irq_set_lockdep_class(virq, &pmc_lock_class);
+
+ irq_set_chip_and_handler(virq, &pmc_irq,
+ handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ irq_set_chip_data(virq, pmc);
+
+ return 0;
+}
+
+static int pmc_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ struct at91_pmc *pmc = d->host_data;
+ const struct at91_pmc_caps *caps = pmc->caps;
+
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+
+ if (!(caps->available_irqs & (1 << *out_hwirq)))
+ return -EINVAL;
+
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static struct irq_domain_ops pmc_irq_ops = {
+ .map = pmc_irq_map,
+ .xlate = pmc_irq_domain_xlate,
+};
+
+static irqreturn_t pmc_irq_handler(int irq, void *data)
+{
+ struct at91_pmc *pmc = (struct at91_pmc *)data;
+ unsigned long sr;
+ int n;
+
+ sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR);
+ if (!sr)
+ return IRQ_NONE;
+
+ for_each_set_bit(n, &sr, BITS_PER_LONG)
+ generic_handle_irq(irq_find_mapping(pmc->irqdomain, n));
+
+ return IRQ_HANDLED;
+}
+
+static const struct at91_pmc_caps at91rm9200_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
+ AT91_PMC_PCK3RDY,
+};
+
+static const struct at91_pmc_caps at91sam9260_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY,
+};
+
+static const struct at91_pmc_caps at91sam9g45_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY,
+};
+
+static const struct at91_pmc_caps at91sam9n12_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
+ AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
+};
+
+static const struct at91_pmc_caps at91sam9x5_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
+ AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
+};
+
+static const struct at91_pmc_caps sama5d3_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
+ AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
+ AT91_PMC_CFDEV,
+};
+
+static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
+ void __iomem *regbase, int virq,
+ const struct at91_pmc_caps *caps)
+{
+ struct at91_pmc *pmc;
+
+ if (!regbase || !virq || !caps)
+ return NULL;
+
+ at91_pmc_base = regbase;
+
+ pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
+ if (!pmc)
+ return NULL;
+
+ spin_lock_init(&pmc->lock);
+ pmc->regbase = regbase;
+ pmc->virq = virq;
+ pmc->caps = caps;
+
+ pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc);
+
+ if (!pmc->irqdomain)
+ goto out_free_pmc;
+
+ pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
+ if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc))
+ goto out_remove_irqdomain;
+
+ return pmc;
+
+out_remove_irqdomain:
+ irq_domain_remove(pmc->irqdomain);
+out_free_pmc:
+ kfree(pmc);
+
+ return NULL;
+}
+
+static const struct of_device_id pmc_clk_ids[] __initconst = {
+ /* Slow oscillator */
+ {
+ .compatible = "atmel,at91sam9260-clk-slow",
+ .data = of_at91sam9260_clk_slow_setup,
+ },
+ /* Main clock */
+ {
+ .compatible = "atmel,at91rm9200-clk-main-osc",
+ .data = of_at91rm9200_clk_main_osc_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-main-rc-osc",
+ .data = of_at91sam9x5_clk_main_rc_osc_setup,
+ },
+ {
+ .compatible = "atmel,at91rm9200-clk-main",
+ .data = of_at91rm9200_clk_main_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-main",
+ .data = of_at91sam9x5_clk_main_setup,
+ },
+ /* PLL clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-pll",
+ .data = of_at91rm9200_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-clk-pll",
+ .data = of_at91sam9g45_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g20-clk-pllb",
+ .data = of_at91sam9g20_clk_pllb_setup,
+ },
+ {
+ .compatible = "atmel,sama5d3-clk-pll",
+ .data = of_sama5d3_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-plldiv",
+ .data = of_at91sam9x5_clk_plldiv_setup,
+ },
+ /* Master clock */
+ {
+ .compatible = "atmel,at91rm9200-clk-master",
+ .data = of_at91rm9200_clk_master_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-master",
+ .data = of_at91sam9x5_clk_master_setup,
+ },
+ /* System clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-system",
+ .data = of_at91rm9200_clk_sys_setup,
+ },
+ /* Peripheral clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-peripheral",
+ .data = of_at91rm9200_clk_periph_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-peripheral",
+ .data = of_at91sam9x5_clk_periph_setup,
+ },
+ /* Programmable clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-programmable",
+ .data = of_at91rm9200_clk_prog_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-clk-programmable",
+ .data = of_at91sam9g45_clk_prog_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-programmable",
+ .data = of_at91sam9x5_clk_prog_setup,
+ },
+ /* UTMI clock */
+#if defined(CONFIG_HAVE_AT91_UTMI)
+ {
+ .compatible = "atmel,at91sam9x5-clk-utmi",
+ .data = of_at91sam9x5_clk_utmi_setup,
+ },
+#endif
+ /* USB clock */
+#if defined(CONFIG_HAVE_AT91_USB_CLK)
+ {
+ .compatible = "atmel,at91rm9200-clk-usb",
+ .data = of_at91rm9200_clk_usb_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-usb",
+ .data = of_at91sam9x5_clk_usb_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9n12-clk-usb",
+ .data = of_at91sam9n12_clk_usb_setup,
+ },
+#endif
+ /* SMD clock */
+#if defined(CONFIG_HAVE_AT91_SMD)
+ {
+ .compatible = "atmel,at91sam9x5-clk-smd",
+ .data = of_at91sam9x5_clk_smd_setup,
+ },
+#endif
+ { /*sentinel*/ }
+};
+
+static void __init of_at91_pmc_setup(struct device_node *np,
+ const struct at91_pmc_caps *caps)
+{
+ struct at91_pmc *pmc;
+ struct device_node *childnp;
+ void (*clk_setup)(struct device_node *, struct at91_pmc *);
+ const struct of_device_id *clk_id;
+ void __iomem *regbase = of_iomap(np, 0);
+ int virq;
+
+ if (!regbase)
+ return;
+
+ virq = irq_of_parse_and_map(np, 0);
+ if (!virq)
+ return;
+
+ pmc = at91_pmc_init(np, regbase, virq, caps);
+ if (!pmc)
+ return;
+ for_each_child_of_node(np, childnp) {
+ clk_id = of_match_node(pmc_clk_ids, childnp);
+ if (!clk_id)
+ continue;
+ clk_setup = clk_id->data;
+ clk_setup(childnp, pmc);
+ }
+}
+
+static void __init of_at91rm9200_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91rm9200_caps);
+}
+CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc",
+ of_at91rm9200_pmc_setup);
+
+static void __init of_at91sam9260_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9260_caps);
+}
+CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc",
+ of_at91sam9260_pmc_setup);
+
+static void __init of_at91sam9g45_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9g45_caps);
+}
+CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc",
+ of_at91sam9g45_pmc_setup);
+
+static void __init of_at91sam9n12_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9n12_caps);
+}
+CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc",
+ of_at91sam9n12_pmc_setup);
+
+static void __init of_at91sam9x5_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9x5_caps);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc",
+ of_at91sam9x5_pmc_setup);
+
+static void __init of_sama5d3_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &sama5d3_caps);
+}
+CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc",
+ of_sama5d3_pmc_setup);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
new file mode 100644
index 00000000000..6c762597611
--- /dev/null
+++ b/drivers/clk/at91/pmc.h
@@ -0,0 +1,123 @@
+/*
+ * drivers/clk/at91/pmc.h
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PMC_H_
+#define __PMC_H_
+
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/spinlock.h>
+
+struct clk_range {
+ unsigned long min;
+ unsigned long max;
+};
+
+#define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
+
+struct at91_pmc_caps {
+ u32 available_irqs;
+};
+
+struct at91_pmc {
+ void __iomem *regbase;
+ int virq;
+ spinlock_t lock;
+ const struct at91_pmc_caps *caps;
+ struct irq_domain *irqdomain;
+};
+
+static inline void pmc_lock(struct at91_pmc *pmc)
+{
+ spin_lock(&pmc->lock);
+}
+
+static inline void pmc_unlock(struct at91_pmc *pmc)
+{
+ spin_unlock(&pmc->lock);
+}
+
+static inline u32 pmc_read(struct at91_pmc *pmc, int offset)
+{
+ return readl(pmc->regbase + offset);
+}
+
+static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
+{
+ writel(value, pmc->regbase + offset);
+}
+
+int of_at91_get_clk_range(struct device_node *np, const char *propname,
+ struct clk_range *range);
+
+extern void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91rm9200_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_sama5d3_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+#if defined(CONFIG_HAVE_AT91_UTMI)
+extern void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#if defined(CONFIG_HAVE_AT91_USB_CLK)
+extern void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#if defined(CONFIG_HAVE_AT91_SMD)
+extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#endif /* __PMC_H_ */
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
new file mode 100644
index 00000000000..1184d76a7ab
--- /dev/null
+++ b/drivers/clk/at91/sckc.c
@@ -0,0 +1,57 @@
+/*
+ * drivers/clk/at91/sckc.c
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "sckc.h"
+
+static const struct of_device_id sckc_clk_ids[] __initconst = {
+ /* Slow clock */
+ {
+ .compatible = "atmel,at91sam9x5-clk-slow-osc",
+ .data = of_at91sam9x5_clk_slow_osc_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-slow-rc-osc",
+ .data = of_at91sam9x5_clk_slow_rc_osc_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-slow",
+ .data = of_at91sam9x5_clk_slow_setup,
+ },
+ { /*sentinel*/ }
+};
+
+static void __init of_at91sam9x5_sckc_setup(struct device_node *np)
+{
+ struct device_node *childnp;
+ void (*clk_setup)(struct device_node *, void __iomem *);
+ const struct of_device_id *clk_id;
+ void __iomem *regbase = of_iomap(np, 0);
+
+ if (!regbase)
+ return;
+
+ for_each_child_of_node(np, childnp) {
+ clk_id = of_match_node(sckc_clk_ids, childnp);
+ if (!clk_id)
+ continue;
+ clk_setup = clk_id->data;
+ clk_setup(childnp, regbase);
+ }
+}
+CLK_OF_DECLARE(at91sam9x5_clk_sckc, "atmel,at91sam9x5-sckc",
+ of_at91sam9x5_sckc_setup);
diff --git a/drivers/clk/at91/sckc.h b/drivers/clk/at91/sckc.h
new file mode 100644
index 00000000000..836fcf59820
--- /dev/null
+++ b/drivers/clk/at91/sckc.h
@@ -0,0 +1,22 @@
+/*
+ * drivers/clk/at91/sckc.h
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __AT91_SCKC_H_
+#define __AT91_SCKC_H_
+
+extern void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np,
+ void __iomem *sckcr);
+extern void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np,
+ void __iomem *sckcr);
+extern void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
+ void __iomem *sckcr);
+
+#endif /* __AT91_SCKC_H_ */
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
new file mode 100644
index 00000000000..75506e53075
--- /dev/null
+++ b/drivers/clk/bcm/Kconfig
@@ -0,0 +1,9 @@
+config CLK_BCM_KONA
+ bool "Broadcom Kona CCU clock support"
+ depends on ARCH_BCM_MOBILE
+ depends on COMMON_CLK
+ default y
+ help
+ Enable common clock framework support for Broadcom SoCs
+ using "Kona" style clock control units, including those
+ in the BCM281xx and BCM21664 families.
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
new file mode 100644
index 00000000000..6297d05a9a1
--- /dev/null
+++ b/drivers/clk/bcm/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm21664.o
diff --git a/drivers/clk/bcm/clk-bcm21664.c b/drivers/clk/bcm/clk-bcm21664.c
new file mode 100644
index 00000000000..eeae4cad228
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm21664.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ * Copyright 2014 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "clk-kona.h"
+#include "dt-bindings/clock/bcm21664.h"
+
+#define BCM21664_CCU_COMMON(_name, _capname) \
+ KONA_CCU_COMMON(BCM21664, _name, _capname)
+
+/* Root CCU */
+
+static struct peri_clk_data frac_1m_data = {
+ .gate = HW_SW_GATE(0x214, 16, 0, 1),
+ .clocks = CLOCKS("ref_crystal"),
+};
+
+static struct ccu_data root_ccu_data = {
+ BCM21664_CCU_COMMON(root, ROOT),
+ /* no policy control */
+ .kona_clks = {
+ [BCM21664_ROOT_CCU_FRAC_1M] =
+ KONA_CLK(root, frac_1m, peri),
+ [BCM21664_ROOT_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* AON CCU */
+
+static struct peri_clk_data hub_timer_data = {
+ .gate = HW_SW_GATE(0x0414, 16, 0, 1),
+ .hyst = HYST(0x0414, 8, 9),
+ .clocks = CLOCKS("bbl_32k",
+ "frac_1m",
+ "dft_19_5m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .trig = TRIGGER(0x0a40, 4),
+};
+
+static struct ccu_data aon_ccu_data = {
+ BCM21664_CCU_COMMON(aon, AON),
+ .policy = {
+ .enable = CCU_LVM_EN(0x0034, 0),
+ .control = CCU_POLICY_CTL(0x000c, 0, 1, 2),
+ },
+ .kona_clks = {
+ [BCM21664_AON_CCU_HUB_TIMER] =
+ KONA_CLK(aon, hub_timer, peri),
+ [BCM21664_AON_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Master CCU */
+
+static struct peri_clk_data sdio1_data = {
+ .gate = HW_SW_GATE(0x0358, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a28, 0, 3),
+ .div = DIVIDER(0x0a28, 4, 14),
+ .trig = TRIGGER(0x0afc, 9),
+};
+
+static struct peri_clk_data sdio2_data = {
+ .gate = HW_SW_GATE(0x035c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a2c, 0, 3),
+ .div = DIVIDER(0x0a2c, 4, 14),
+ .trig = TRIGGER(0x0afc, 10),
+};
+
+static struct peri_clk_data sdio3_data = {
+ .gate = HW_SW_GATE(0x0364, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a34, 0, 3),
+ .div = DIVIDER(0x0a34, 4, 14),
+ .trig = TRIGGER(0x0afc, 12),
+};
+
+static struct peri_clk_data sdio4_data = {
+ .gate = HW_SW_GATE(0x0360, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a30, 0, 3),
+ .div = DIVIDER(0x0a30, 4, 14),
+ .trig = TRIGGER(0x0afc, 11),
+};
+
+static struct peri_clk_data sdio1_sleep_data = {
+ .clocks = CLOCKS("ref_32k"), /* Verify */
+ .gate = HW_SW_GATE(0x0358, 18, 2, 3),
+};
+
+static struct peri_clk_data sdio2_sleep_data = {
+ .clocks = CLOCKS("ref_32k"), /* Verify */
+ .gate = HW_SW_GATE(0x035c, 18, 2, 3),
+};
+
+static struct peri_clk_data sdio3_sleep_data = {
+ .clocks = CLOCKS("ref_32k"), /* Verify */
+ .gate = HW_SW_GATE(0x0364, 18, 2, 3),
+};
+
+static struct peri_clk_data sdio4_sleep_data = {
+ .clocks = CLOCKS("ref_32k"), /* Verify */
+ .gate = HW_SW_GATE(0x0360, 18, 2, 3),
+};
+
+static struct ccu_data master_ccu_data = {
+ BCM21664_CCU_COMMON(master, MASTER),
+ .policy = {
+ .enable = CCU_LVM_EN(0x0034, 0),
+ .control = CCU_POLICY_CTL(0x000c, 0, 1, 2),
+ },
+ .kona_clks = {
+ [BCM21664_MASTER_CCU_SDIO1] =
+ KONA_CLK(master, sdio1, peri),
+ [BCM21664_MASTER_CCU_SDIO2] =
+ KONA_CLK(master, sdio2, peri),
+ [BCM21664_MASTER_CCU_SDIO3] =
+ KONA_CLK(master, sdio3, peri),
+ [BCM21664_MASTER_CCU_SDIO4] =
+ KONA_CLK(master, sdio4, peri),
+ [BCM21664_MASTER_CCU_SDIO1_SLEEP] =
+ KONA_CLK(master, sdio1_sleep, peri),
+ [BCM21664_MASTER_CCU_SDIO2_SLEEP] =
+ KONA_CLK(master, sdio2_sleep, peri),
+ [BCM21664_MASTER_CCU_SDIO3_SLEEP] =
+ KONA_CLK(master, sdio3_sleep, peri),
+ [BCM21664_MASTER_CCU_SDIO4_SLEEP] =
+ KONA_CLK(master, sdio4_sleep, peri),
+ [BCM21664_MASTER_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Slave CCU */
+
+static struct peri_clk_data uartb_data = {
+ .gate = HW_SW_GATE(0x0400, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .div = FRAC_DIVIDER(0x0a10, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 2),
+};
+
+static struct peri_clk_data uartb2_data = {
+ .gate = HW_SW_GATE(0x0404, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a14, 0, 2),
+ .div = FRAC_DIVIDER(0x0a14, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 3),
+};
+
+static struct peri_clk_data uartb3_data = {
+ .gate = HW_SW_GATE(0x0408, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a18, 0, 2),
+ .div = FRAC_DIVIDER(0x0a18, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 4),
+};
+
+static struct peri_clk_data bsc1_data = {
+ .gate = HW_SW_GATE(0x0458, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a64, 0, 3),
+ .trig = TRIGGER(0x0afc, 23),
+};
+
+static struct peri_clk_data bsc2_data = {
+ .gate = HW_SW_GATE(0x045c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a68, 0, 3),
+ .trig = TRIGGER(0x0afc, 24),
+};
+
+static struct peri_clk_data bsc3_data = {
+ .gate = HW_SW_GATE(0x0470, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a7c, 0, 3),
+ .trig = TRIGGER(0x0afc, 18),
+};
+
+static struct peri_clk_data bsc4_data = {
+ .gate = HW_SW_GATE(0x0474, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a80, 0, 3),
+ .trig = TRIGGER(0x0afc, 19),
+};
+
+static struct ccu_data slave_ccu_data = {
+ BCM21664_CCU_COMMON(slave, SLAVE),
+ .policy = {
+ .enable = CCU_LVM_EN(0x0034, 0),
+ .control = CCU_POLICY_CTL(0x000c, 0, 1, 2),
+ },
+ .kona_clks = {
+ [BCM21664_SLAVE_CCU_UARTB] =
+ KONA_CLK(slave, uartb, peri),
+ [BCM21664_SLAVE_CCU_UARTB2] =
+ KONA_CLK(slave, uartb2, peri),
+ [BCM21664_SLAVE_CCU_UARTB3] =
+ KONA_CLK(slave, uartb3, peri),
+ [BCM21664_SLAVE_CCU_BSC1] =
+ KONA_CLK(slave, bsc1, peri),
+ [BCM21664_SLAVE_CCU_BSC2] =
+ KONA_CLK(slave, bsc2, peri),
+ [BCM21664_SLAVE_CCU_BSC3] =
+ KONA_CLK(slave, bsc3, peri),
+ [BCM21664_SLAVE_CCU_BSC4] =
+ KONA_CLK(slave, bsc4, peri),
+ [BCM21664_SLAVE_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Device tree match table callback functions */
+
+static void __init kona_dt_root_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&root_ccu_data, node);
+}
+
+static void __init kona_dt_aon_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&aon_ccu_data, node);
+}
+
+static void __init kona_dt_master_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&master_ccu_data, node);
+}
+
+static void __init kona_dt_slave_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&slave_ccu_data, node);
+}
+
+CLK_OF_DECLARE(bcm21664_root_ccu, BCM21664_DT_ROOT_CCU_COMPAT,
+ kona_dt_root_ccu_setup);
+CLK_OF_DECLARE(bcm21664_aon_ccu, BCM21664_DT_AON_CCU_COMPAT,
+ kona_dt_aon_ccu_setup);
+CLK_OF_DECLARE(bcm21664_master_ccu, BCM21664_DT_MASTER_CCU_COMPAT,
+ kona_dt_master_ccu_setup);
+CLK_OF_DECLARE(bcm21664_slave_ccu, BCM21664_DT_SLAVE_CCU_COMPAT,
+ kona_dt_slave_ccu_setup);
diff --git a/drivers/clk/bcm/clk-bcm281xx.c b/drivers/clk/bcm/clk-bcm281xx.c
new file mode 100644
index 00000000000..502a487d62c
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm281xx.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "clk-kona.h"
+#include "dt-bindings/clock/bcm281xx.h"
+
+#define BCM281XX_CCU_COMMON(_name, _ucase_name) \
+ KONA_CCU_COMMON(BCM281XX, _name, _ucase_name)
+
+/* Root CCU */
+
+static struct peri_clk_data frac_1m_data = {
+ .gate = HW_SW_GATE(0x214, 16, 0, 1),
+ .trig = TRIGGER(0x0e04, 0),
+ .div = FRAC_DIVIDER(0x0e00, 0, 22, 16),
+ .clocks = CLOCKS("ref_crystal"),
+};
+
+static struct ccu_data root_ccu_data = {
+ BCM281XX_CCU_COMMON(root, ROOT),
+ .kona_clks = {
+ [BCM281XX_ROOT_CCU_FRAC_1M] =
+ KONA_CLK(root, frac_1m, peri),
+ [BCM281XX_ROOT_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* AON CCU */
+
+static struct peri_clk_data hub_timer_data = {
+ .gate = HW_SW_GATE(0x0414, 16, 0, 1),
+ .clocks = CLOCKS("bbl_32k",
+ "frac_1m",
+ "dft_19_5m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .trig = TRIGGER(0x0a40, 4),
+};
+
+static struct peri_clk_data pmu_bsc_data = {
+ .gate = HW_SW_GATE(0x0418, 16, 0, 1),
+ .clocks = CLOCKS("ref_crystal",
+ "pmu_bsc_var",
+ "bbl_32k"),
+ .sel = SELECTOR(0x0a04, 0, 2),
+ .div = DIVIDER(0x0a04, 3, 4),
+ .trig = TRIGGER(0x0a40, 0),
+};
+
+static struct peri_clk_data pmu_bsc_var_data = {
+ .clocks = CLOCKS("var_312m",
+ "ref_312m"),
+ .sel = SELECTOR(0x0a00, 0, 2),
+ .div = DIVIDER(0x0a00, 4, 5),
+ .trig = TRIGGER(0x0a40, 2),
+};
+
+static struct ccu_data aon_ccu_data = {
+ BCM281XX_CCU_COMMON(aon, AON),
+ .kona_clks = {
+ [BCM281XX_AON_CCU_HUB_TIMER] =
+ KONA_CLK(aon, hub_timer, peri),
+ [BCM281XX_AON_CCU_PMU_BSC] =
+ KONA_CLK(aon, pmu_bsc, peri),
+ [BCM281XX_AON_CCU_PMU_BSC_VAR] =
+ KONA_CLK(aon, pmu_bsc_var, peri),
+ [BCM281XX_AON_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Hub CCU */
+
+static struct peri_clk_data tmon_1m_data = {
+ .gate = HW_SW_GATE(0x04a4, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "frac_1m"),
+ .sel = SELECTOR(0x0e74, 0, 2),
+ .trig = TRIGGER(0x0e84, 1),
+};
+
+static struct ccu_data hub_ccu_data = {
+ BCM281XX_CCU_COMMON(hub, HUB),
+ .kona_clks = {
+ [BCM281XX_HUB_CCU_TMON_1M] =
+ KONA_CLK(hub, tmon_1m, peri),
+ [BCM281XX_HUB_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Master CCU */
+
+static struct peri_clk_data sdio1_data = {
+ .gate = HW_SW_GATE(0x0358, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a28, 0, 3),
+ .div = DIVIDER(0x0a28, 4, 14),
+ .trig = TRIGGER(0x0afc, 9),
+};
+
+static struct peri_clk_data sdio2_data = {
+ .gate = HW_SW_GATE(0x035c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a2c, 0, 3),
+ .div = DIVIDER(0x0a2c, 4, 14),
+ .trig = TRIGGER(0x0afc, 10),
+};
+
+static struct peri_clk_data sdio3_data = {
+ .gate = HW_SW_GATE(0x0364, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a34, 0, 3),
+ .div = DIVIDER(0x0a34, 4, 14),
+ .trig = TRIGGER(0x0afc, 12),
+};
+
+static struct peri_clk_data sdio4_data = {
+ .gate = HW_SW_GATE(0x0360, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a30, 0, 3),
+ .div = DIVIDER(0x0a30, 4, 14),
+ .trig = TRIGGER(0x0afc, 11),
+};
+
+static struct peri_clk_data usb_ic_data = {
+ .gate = HW_SW_GATE(0x0354, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .div = FIXED_DIVIDER(2),
+ .sel = SELECTOR(0x0a24, 0, 2),
+ .trig = TRIGGER(0x0afc, 7),
+};
+
+/* also called usbh_48m */
+static struct peri_clk_data hsic2_48m_data = {
+ .gate = HW_SW_GATE(0x0370, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a38, 0, 2),
+ .div = FIXED_DIVIDER(2),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+/* also called usbh_12m */
+static struct peri_clk_data hsic2_12m_data = {
+ .gate = HW_SW_GATE(0x0370, 20, 4, 5),
+ .div = DIVIDER(0x0a38, 12, 2),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .pre_div = FIXED_DIVIDER(2),
+ .sel = SELECTOR(0x0a38, 0, 2),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+static struct ccu_data master_ccu_data = {
+ BCM281XX_CCU_COMMON(master, MASTER),
+ .kona_clks = {
+ [BCM281XX_MASTER_CCU_SDIO1] =
+ KONA_CLK(master, sdio1, peri),
+ [BCM281XX_MASTER_CCU_SDIO2] =
+ KONA_CLK(master, sdio2, peri),
+ [BCM281XX_MASTER_CCU_SDIO3] =
+ KONA_CLK(master, sdio3, peri),
+ [BCM281XX_MASTER_CCU_SDIO4] =
+ KONA_CLK(master, sdio4, peri),
+ [BCM281XX_MASTER_CCU_USB_IC] =
+ KONA_CLK(master, usb_ic, peri),
+ [BCM281XX_MASTER_CCU_HSIC2_48M] =
+ KONA_CLK(master, hsic2_48m, peri),
+ [BCM281XX_MASTER_CCU_HSIC2_12M] =
+ KONA_CLK(master, hsic2_12m, peri),
+ [BCM281XX_MASTER_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Slave CCU */
+
+static struct peri_clk_data uartb_data = {
+ .gate = HW_SW_GATE(0x0400, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .div = FRAC_DIVIDER(0x0a10, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 2),
+};
+
+static struct peri_clk_data uartb2_data = {
+ .gate = HW_SW_GATE(0x0404, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a14, 0, 2),
+ .div = FRAC_DIVIDER(0x0a14, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 3),
+};
+
+static struct peri_clk_data uartb3_data = {
+ .gate = HW_SW_GATE(0x0408, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a18, 0, 2),
+ .div = FRAC_DIVIDER(0x0a18, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 4),
+};
+
+static struct peri_clk_data uartb4_data = {
+ .gate = HW_SW_GATE(0x0408, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a1c, 0, 2),
+ .div = FRAC_DIVIDER(0x0a1c, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+static struct peri_clk_data ssp0_data = {
+ .gate = HW_SW_GATE(0x0410, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a20, 0, 3),
+ .div = DIVIDER(0x0a20, 4, 14),
+ .trig = TRIGGER(0x0afc, 6),
+};
+
+static struct peri_clk_data ssp2_data = {
+ .gate = HW_SW_GATE(0x0418, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a28, 0, 3),
+ .div = DIVIDER(0x0a28, 4, 14),
+ .trig = TRIGGER(0x0afc, 8),
+};
+
+static struct peri_clk_data bsc1_data = {
+ .gate = HW_SW_GATE(0x0458, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a64, 0, 3),
+ .trig = TRIGGER(0x0afc, 23),
+};
+
+static struct peri_clk_data bsc2_data = {
+ .gate = HW_SW_GATE(0x045c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a68, 0, 3),
+ .trig = TRIGGER(0x0afc, 24),
+};
+
+static struct peri_clk_data bsc3_data = {
+ .gate = HW_SW_GATE(0x0484, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a84, 0, 3),
+ .trig = TRIGGER(0x0b00, 2),
+};
+
+static struct peri_clk_data pwm_data = {
+ .gate = HW_SW_GATE(0x0468, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m"),
+ .sel = SELECTOR(0x0a70, 0, 2),
+ .div = DIVIDER(0x0a70, 4, 3),
+ .trig = TRIGGER(0x0afc, 15),
+};
+
+static struct ccu_data slave_ccu_data = {
+ BCM281XX_CCU_COMMON(slave, SLAVE),
+ .kona_clks = {
+ [BCM281XX_SLAVE_CCU_UARTB] =
+ KONA_CLK(slave, uartb, peri),
+ [BCM281XX_SLAVE_CCU_UARTB2] =
+ KONA_CLK(slave, uartb2, peri),
+ [BCM281XX_SLAVE_CCU_UARTB3] =
+ KONA_CLK(slave, uartb3, peri),
+ [BCM281XX_SLAVE_CCU_UARTB4] =
+ KONA_CLK(slave, uartb4, peri),
+ [BCM281XX_SLAVE_CCU_SSP0] =
+ KONA_CLK(slave, ssp0, peri),
+ [BCM281XX_SLAVE_CCU_SSP2] =
+ KONA_CLK(slave, ssp2, peri),
+ [BCM281XX_SLAVE_CCU_BSC1] =
+ KONA_CLK(slave, bsc1, peri),
+ [BCM281XX_SLAVE_CCU_BSC2] =
+ KONA_CLK(slave, bsc2, peri),
+ [BCM281XX_SLAVE_CCU_BSC3] =
+ KONA_CLK(slave, bsc3, peri),
+ [BCM281XX_SLAVE_CCU_PWM] =
+ KONA_CLK(slave, pwm, peri),
+ [BCM281XX_SLAVE_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Device tree match table callback functions */
+
+static void __init kona_dt_root_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&root_ccu_data, node);
+}
+
+static void __init kona_dt_aon_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&aon_ccu_data, node);
+}
+
+static void __init kona_dt_hub_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&hub_ccu_data, node);
+}
+
+static void __init kona_dt_master_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&master_ccu_data, node);
+}
+
+static void __init kona_dt_slave_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&slave_ccu_data, node);
+}
+
+CLK_OF_DECLARE(bcm281xx_root_ccu, BCM281XX_DT_ROOT_CCU_COMPAT,
+ kona_dt_root_ccu_setup);
+CLK_OF_DECLARE(bcm281xx_aon_ccu, BCM281XX_DT_AON_CCU_COMPAT,
+ kona_dt_aon_ccu_setup);
+CLK_OF_DECLARE(bcm281xx_hub_ccu, BCM281XX_DT_HUB_CCU_COMPAT,
+ kona_dt_hub_ccu_setup);
+CLK_OF_DECLARE(bcm281xx_master_ccu, BCM281XX_DT_MASTER_CCU_COMPAT,
+ kona_dt_master_ccu_setup);
+CLK_OF_DECLARE(bcm281xx_slave_ccu, BCM281XX_DT_SLAVE_CCU_COMPAT,
+ kona_dt_slave_ccu_setup);
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
new file mode 100644
index 00000000000..e5aededdd32
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -0,0 +1,877 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+
+#include "clk-kona.h"
+
+/* These are used when a selector or trigger is found to be unneeded */
+#define selector_clear_exists(sel) ((sel)->width = 0)
+#define trigger_clear_exists(trig) FLAG_CLEAR(trig, TRIG, EXISTS)
+
+LIST_HEAD(ccu_list); /* The list of set up CCUs */
+
+/* Validity checking */
+
+static bool ccu_data_offsets_valid(struct ccu_data *ccu)
+{
+ struct ccu_policy *ccu_policy = &ccu->policy;
+ u32 limit;
+
+ limit = ccu->range - sizeof(u32);
+ limit = round_down(limit, sizeof(u32));
+ if (ccu_policy_exists(ccu_policy)) {
+ if (ccu_policy->enable.offset > limit) {
+ pr_err("%s: bad policy enable offset for %s "
+ "(%u > %u)\n", __func__,
+ ccu->name, ccu_policy->enable.offset, limit);
+ return false;
+ }
+ if (ccu_policy->control.offset > limit) {
+ pr_err("%s: bad policy control offset for %s "
+ "(%u > %u)\n", __func__,
+ ccu->name, ccu_policy->control.offset, limit);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool clk_requires_trigger(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri = bcm_clk->u.peri;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_div *div;
+
+ if (bcm_clk->type != bcm_clk_peri)
+ return false;
+
+ sel = &peri->sel;
+ if (sel->parent_count && selector_exists(sel))
+ return true;
+
+ div = &peri->div;
+ if (!divider_exists(div))
+ return false;
+
+ /* Fixed dividers don't need triggers */
+ if (!divider_is_fixed(div))
+ return true;
+
+ div = &peri->pre_div;
+
+ return divider_exists(div) && !divider_is_fixed(div);
+}
+
+static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri;
+ struct bcm_clk_policy *policy;
+ struct bcm_clk_gate *gate;
+ struct bcm_clk_hyst *hyst;
+ struct bcm_clk_div *div;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_trig *trig;
+ const char *name;
+ u32 range;
+ u32 limit;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+ peri = bcm_clk->u.peri;
+ name = bcm_clk->init_data.name;
+ range = bcm_clk->ccu->range;
+
+ limit = range - sizeof(u32);
+ limit = round_down(limit, sizeof(u32));
+
+ policy = &peri->policy;
+ if (policy_exists(policy)) {
+ if (policy->offset > limit) {
+ pr_err("%s: bad policy offset for %s (%u > %u)\n",
+ __func__, name, policy->offset, limit);
+ return false;
+ }
+ }
+
+ gate = &peri->gate;
+ hyst = &peri->hyst;
+ if (gate_exists(gate)) {
+ if (gate->offset > limit) {
+ pr_err("%s: bad gate offset for %s (%u > %u)\n",
+ __func__, name, gate->offset, limit);
+ return false;
+ }
+
+ if (hyst_exists(hyst)) {
+ if (hyst->offset > limit) {
+ pr_err("%s: bad hysteresis offset for %s "
+ "(%u > %u)\n", __func__,
+ name, hyst->offset, limit);
+ return false;
+ }
+ }
+ } else if (hyst_exists(hyst)) {
+ pr_err("%s: hysteresis but no gate for %s\n", __func__, name);
+ return false;
+ }
+
+ div = &peri->div;
+ if (divider_exists(div)) {
+ if (div->u.s.offset > limit) {
+ pr_err("%s: bad divider offset for %s (%u > %u)\n",
+ __func__, name, div->u.s.offset, limit);
+ return false;
+ }
+ }
+
+ div = &peri->pre_div;
+ if (divider_exists(div)) {
+ if (div->u.s.offset > limit) {
+ pr_err("%s: bad pre-divider offset for %s "
+ "(%u > %u)\n",
+ __func__, name, div->u.s.offset, limit);
+ return false;
+ }
+ }
+
+ sel = &peri->sel;
+ if (selector_exists(sel)) {
+ if (sel->offset > limit) {
+ pr_err("%s: bad selector offset for %s (%u > %u)\n",
+ __func__, name, sel->offset, limit);
+ return false;
+ }
+ }
+
+ trig = &peri->trig;
+ if (trigger_exists(trig)) {
+ if (trig->offset > limit) {
+ pr_err("%s: bad trigger offset for %s (%u > %u)\n",
+ __func__, name, trig->offset, limit);
+ return false;
+ }
+ }
+
+ trig = &peri->pre_trig;
+ if (trigger_exists(trig)) {
+ if (trig->offset > limit) {
+ pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n",
+ __func__, name, trig->offset, limit);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* A bit position must be less than the number of bits in a 32-bit register. */
+static bool bit_posn_valid(u32 bit_posn, const char *field_name,
+ const char *clock_name)
+{
+ u32 limit = BITS_PER_BYTE * sizeof(u32) - 1;
+
+ if (bit_posn > limit) {
+ pr_err("%s: bad %s bit for %s (%u > %u)\n", __func__,
+ field_name, clock_name, bit_posn, limit);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * A bitfield must be at least 1 bit wide. Both the low-order and
+ * high-order bits must lie within a 32-bit register. We require
+ * fields to be less than 32 bits wide, mainly because we use
+ * shifting to produce field masks, and shifting a full word width
+ * is not well-defined by the C standard.
+ */
+static bool bitfield_valid(u32 shift, u32 width, const char *field_name,
+ const char *clock_name)
+{
+ u32 limit = BITS_PER_BYTE * sizeof(u32);
+
+ if (!width) {
+ pr_err("%s: bad %s field width 0 for %s\n", __func__,
+ field_name, clock_name);
+ return false;
+ }
+ if (shift + width > limit) {
+ pr_err("%s: bad %s for %s (%u + %u > %u)\n", __func__,
+ field_name, clock_name, shift, width, limit);
+ return false;
+ }
+ return true;
+}
+
+static bool
+ccu_policy_valid(struct ccu_policy *ccu_policy, const char *ccu_name)
+{
+ struct bcm_lvm_en *enable = &ccu_policy->enable;
+ struct bcm_policy_ctl *control;
+
+ if (!bit_posn_valid(enable->bit, "policy enable", ccu_name))
+ return false;
+
+ control = &ccu_policy->control;
+ if (!bit_posn_valid(control->go_bit, "policy control GO", ccu_name))
+ return false;
+
+ if (!bit_posn_valid(control->atl_bit, "policy control ATL", ccu_name))
+ return false;
+
+ if (!bit_posn_valid(control->ac_bit, "policy control AC", ccu_name))
+ return false;
+
+ return true;
+}
+
+static bool policy_valid(struct bcm_clk_policy *policy, const char *clock_name)
+{
+ if (!bit_posn_valid(policy->bit, "policy", clock_name))
+ return false;
+
+ return true;
+}
+
+/*
+ * All gates, if defined, have a status bit, and for hardware-only
+ * gates, that's it. Gates that can be software controlled also
+ * have an enable bit. And a gate that can be hardware or software
+ * controlled will have a hardware/software select bit.
+ */
+static bool gate_valid(struct bcm_clk_gate *gate, const char *field_name,
+ const char *clock_name)
+{
+ if (!bit_posn_valid(gate->status_bit, "gate status", clock_name))
+ return false;
+
+ if (gate_is_sw_controllable(gate)) {
+ if (!bit_posn_valid(gate->en_bit, "gate enable", clock_name))
+ return false;
+
+ if (gate_is_hw_controllable(gate)) {
+ if (!bit_posn_valid(gate->hw_sw_sel_bit,
+ "gate hw/sw select",
+ clock_name))
+ return false;
+ }
+ } else {
+ BUG_ON(!gate_is_hw_controllable(gate));
+ }
+
+ return true;
+}
+
+static bool hyst_valid(struct bcm_clk_hyst *hyst, const char *clock_name)
+{
+ if (!bit_posn_valid(hyst->en_bit, "hysteresis enable", clock_name))
+ return false;
+
+ if (!bit_posn_valid(hyst->val_bit, "hysteresis value", clock_name))
+ return false;
+
+ return true;
+}
+
+/*
+ * A selector bitfield must be valid. Its parent_sel array must
+ * also be reasonable for the field.
+ */
+static bool sel_valid(struct bcm_clk_sel *sel, const char *field_name,
+ const char *clock_name)
+{
+ if (!bitfield_valid(sel->shift, sel->width, field_name, clock_name))
+ return false;
+
+ if (sel->parent_count) {
+ u32 max_sel;
+ u32 limit;
+
+ /*
+ * Make sure the selector field can hold all the
+ * selector values we expect to be able to use. A
+ * clock only needs to have a selector defined if it
+ * has more than one parent. And in that case the
+ * highest selector value will be in the last entry
+ * in the array.
+ */
+ max_sel = sel->parent_sel[sel->parent_count - 1];
+ limit = (1 << sel->width) - 1;
+ if (max_sel > limit) {
+ pr_err("%s: bad selector for %s "
+ "(%u needs > %u bits)\n",
+ __func__, clock_name, max_sel,
+ sel->width);
+ return false;
+ }
+ } else {
+ pr_warn("%s: ignoring selector for %s (no parents)\n",
+ __func__, clock_name);
+ selector_clear_exists(sel);
+ kfree(sel->parent_sel);
+ sel->parent_sel = NULL;
+ }
+
+ return true;
+}
+
+/*
+ * A fixed divider just needs to be non-zero. A variable divider
+ * has to have a valid divider bitfield, and if it has a fraction,
+ * the width of the fraction must not be no more than the width of
+ * the divider as a whole.
+ */
+static bool div_valid(struct bcm_clk_div *div, const char *field_name,
+ const char *clock_name)
+{
+ if (divider_is_fixed(div)) {
+ /* Any fixed divider value but 0 is OK */
+ if (div->u.fixed == 0) {
+ pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
+ field_name, clock_name);
+ return false;
+ }
+ return true;
+ }
+ if (!bitfield_valid(div->u.s.shift, div->u.s.width,
+ field_name, clock_name))
+ return false;
+
+ if (divider_has_fraction(div))
+ if (div->u.s.frac_width > div->u.s.width) {
+ pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
+ __func__, field_name, clock_name,
+ div->u.s.frac_width, div->u.s.width);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * If a clock has two dividers, the combined number of fractional
+ * bits must be representable in a 32-bit unsigned value. This
+ * is because we scale up a dividend using both dividers before
+ * dividing to improve accuracy, and we need to avoid overflow.
+ */
+static bool kona_dividers_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri = bcm_clk->u.peri;
+ struct bcm_clk_div *div;
+ struct bcm_clk_div *pre_div;
+ u32 limit;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div))
+ return true;
+
+ div = &peri->div;
+ pre_div = &peri->pre_div;
+ if (divider_is_fixed(div) || divider_is_fixed(pre_div))
+ return true;
+
+ limit = BITS_PER_BYTE * sizeof(u32);
+
+ return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
+}
+
+
+/* A trigger just needs to represent a valid bit position */
+static bool trig_valid(struct bcm_clk_trig *trig, const char *field_name,
+ const char *clock_name)
+{
+ return bit_posn_valid(trig->bit, field_name, clock_name);
+}
+
+/* Determine whether the set of peripheral clock registers are valid. */
+static bool
+peri_clk_data_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri;
+ struct bcm_clk_policy *policy;
+ struct bcm_clk_gate *gate;
+ struct bcm_clk_hyst *hyst;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_div *div;
+ struct bcm_clk_div *pre_div;
+ struct bcm_clk_trig *trig;
+ const char *name;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ /*
+ * First validate register offsets. This is the only place
+ * where we need something from the ccu, so we do these
+ * together.
+ */
+ if (!peri_clk_data_offsets_valid(bcm_clk))
+ return false;
+
+ peri = bcm_clk->u.peri;
+ name = bcm_clk->init_data.name;
+
+ policy = &peri->policy;
+ if (policy_exists(policy) && !policy_valid(policy, name))
+ return false;
+
+ gate = &peri->gate;
+ if (gate_exists(gate) && !gate_valid(gate, "gate", name))
+ return false;
+
+ hyst = &peri->hyst;
+ if (hyst_exists(hyst) && !hyst_valid(hyst, name))
+ return false;
+
+ sel = &peri->sel;
+ if (selector_exists(sel)) {
+ if (!sel_valid(sel, "selector", name))
+ return false;
+
+ } else if (sel->parent_count > 1) {
+ pr_err("%s: multiple parents but no selector for %s\n",
+ __func__, name);
+
+ return false;
+ }
+
+ div = &peri->div;
+ pre_div = &peri->pre_div;
+ if (divider_exists(div)) {
+ if (!div_valid(div, "divider", name))
+ return false;
+
+ if (divider_exists(pre_div))
+ if (!div_valid(pre_div, "pre-divider", name))
+ return false;
+ } else if (divider_exists(pre_div)) {
+ pr_err("%s: pre-divider but no divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ trig = &peri->trig;
+ if (trigger_exists(trig)) {
+ if (!trig_valid(trig, "trigger", name))
+ return false;
+
+ if (trigger_exists(&peri->pre_trig)) {
+ if (!trig_valid(trig, "pre-trigger", name)) {
+ return false;
+ }
+ }
+ if (!clk_requires_trigger(bcm_clk)) {
+ pr_warn("%s: ignoring trigger for %s (not needed)\n",
+ __func__, name);
+ trigger_clear_exists(trig);
+ }
+ } else if (trigger_exists(&peri->pre_trig)) {
+ pr_err("%s: pre-trigger but no trigger for %s\n", __func__,
+ name);
+ return false;
+ } else if (clk_requires_trigger(bcm_clk)) {
+ pr_err("%s: required trigger missing for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ return kona_dividers_valid(bcm_clk);
+}
+
+static bool kona_clk_valid(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ if (!peri_clk_data_valid(bcm_clk))
+ return false;
+ break;
+ default:
+ pr_err("%s: unrecognized clock type (%d)\n", __func__,
+ (int)bcm_clk->type);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Scan an array of parent clock names to determine whether there
+ * are any entries containing BAD_CLK_NAME. Such entries are
+ * placeholders for non-supported clocks. Keep track of the
+ * position of each clock name in the original array.
+ *
+ * Allocates an array of pointers to to hold the names of all
+ * non-null entries in the original array, and returns a pointer to
+ * that array in *names. This will be used for registering the
+ * clock with the common clock code. On successful return,
+ * *count indicates how many entries are in that names array.
+ *
+ * If there is more than one entry in the resulting names array,
+ * another array is allocated to record the parent selector value
+ * for each (defined) parent clock. This is the value that
+ * represents this parent clock in the clock's source selector
+ * register. The position of the clock in the original parent array
+ * defines that selector value. The number of entries in this array
+ * is the same as the number of entries in the parent names array.
+ *
+ * The array of selector values is returned. If the clock has no
+ * parents, no selector is required and a null pointer is returned.
+ *
+ * Returns a null pointer if the clock names array supplied was
+ * null. (This is not an error.)
+ *
+ * Returns a pointer-coded error if an error occurs.
+ */
+static u32 *parent_process(const char *clocks[],
+ u32 *count, const char ***names)
+{
+ static const char **parent_names;
+ static u32 *parent_sel;
+ const char **clock;
+ u32 parent_count;
+ u32 bad_count = 0;
+ u32 orig_count;
+ u32 i;
+ u32 j;
+
+ *count = 0; /* In case of early return */
+ *names = NULL;
+ if (!clocks)
+ return NULL;
+
+ /*
+ * Count the number of names in the null-terminated array,
+ * and find out how many of those are actually clock names.
+ */
+ for (clock = clocks; *clock; clock++)
+ if (*clock == BAD_CLK_NAME)
+ bad_count++;
+ orig_count = (u32)(clock - clocks);
+ parent_count = orig_count - bad_count;
+
+ /* If all clocks are unsupported, we treat it as no clock */
+ if (!parent_count)
+ return NULL;
+
+ /* Avoid exceeding our parent clock limit */
+ if (parent_count > PARENT_COUNT_MAX) {
+ pr_err("%s: too many parents (%u > %u)\n", __func__,
+ parent_count, PARENT_COUNT_MAX);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * There is one parent name for each defined parent clock.
+ * We also maintain an array containing the selector value
+ * for each defined clock. If there's only one clock, the
+ * selector is not required, but we allocate space for the
+ * array anyway to keep things simple.
+ */
+ parent_names = kmalloc(parent_count * sizeof(parent_names), GFP_KERNEL);
+ if (!parent_names) {
+ pr_err("%s: error allocating %u parent names\n", __func__,
+ parent_count);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* There is at least one parent, so allocate a selector array */
+
+ parent_sel = kmalloc(parent_count * sizeof(*parent_sel), GFP_KERNEL);
+ if (!parent_sel) {
+ pr_err("%s: error allocating %u parent selectors\n", __func__,
+ parent_count);
+ kfree(parent_names);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Now fill in the parent names and selector arrays */
+ for (i = 0, j = 0; i < orig_count; i++) {
+ if (clocks[i] != BAD_CLK_NAME) {
+ parent_names[j] = clocks[i];
+ parent_sel[j] = i;
+ j++;
+ }
+ }
+ *names = parent_names;
+ *count = parent_count;
+
+ return parent_sel;
+}
+
+static int
+clk_sel_setup(const char **clocks, struct bcm_clk_sel *sel,
+ struct clk_init_data *init_data)
+{
+ const char **parent_names = NULL;
+ u32 parent_count = 0;
+ u32 *parent_sel;
+
+ /*
+ * If a peripheral clock has multiple parents, the value
+ * used by the hardware to select that parent is represented
+ * by the parent clock's position in the "clocks" list. Some
+ * values don't have defined or supported clocks; these will
+ * have BAD_CLK_NAME entries in the parents[] array. The
+ * list is terminated by a NULL entry.
+ *
+ * We need to supply (only) the names of defined parent
+ * clocks when registering a clock though, so we use an
+ * array of parent selector values to map between the
+ * indexes the common clock code uses and the selector
+ * values we need.
+ */
+ parent_sel = parent_process(clocks, &parent_count, &parent_names);
+ if (IS_ERR(parent_sel)) {
+ int ret = PTR_ERR(parent_sel);
+
+ pr_err("%s: error processing parent clocks for %s (%d)\n",
+ __func__, init_data->name, ret);
+
+ return ret;
+ }
+
+ init_data->parent_names = parent_names;
+ init_data->num_parents = parent_count;
+
+ sel->parent_count = parent_count;
+ sel->parent_sel = parent_sel;
+
+ return 0;
+}
+
+static void clk_sel_teardown(struct bcm_clk_sel *sel,
+ struct clk_init_data *init_data)
+{
+ kfree(sel->parent_sel);
+ sel->parent_sel = NULL;
+ sel->parent_count = 0;
+
+ init_data->num_parents = 0;
+ kfree(init_data->parent_names);
+ init_data->parent_names = NULL;
+}
+
+static void peri_clk_teardown(struct peri_clk_data *data,
+ struct clk_init_data *init_data)
+{
+ clk_sel_teardown(&data->sel, init_data);
+}
+
+/*
+ * Caller is responsible for freeing the parent_names[] and
+ * parent_sel[] arrays in the peripheral clock's "data" structure
+ * that can be assigned if the clock has one or more parent clocks
+ * associated with it.
+ */
+static int
+peri_clk_setup(struct peri_clk_data *data, struct clk_init_data *init_data)
+{
+ init_data->flags = CLK_IGNORE_UNUSED;
+
+ return clk_sel_setup(data->clocks, &data->sel, init_data);
+}
+
+static void bcm_clk_teardown(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
+ break;
+ default:
+ break;
+ }
+ bcm_clk->u.data = NULL;
+ bcm_clk->type = bcm_clk_none;
+}
+
+static void kona_clk_teardown(struct clk *clk)
+{
+ struct clk_hw *hw;
+ struct kona_clk *bcm_clk;
+
+ if (!clk)
+ return;
+
+ hw = __clk_get_hw(clk);
+ if (!hw) {
+ pr_err("%s: clk %p has null hw pointer\n", __func__, clk);
+ return;
+ }
+ clk_unregister(clk);
+
+ bcm_clk = to_kona_clk(hw);
+ bcm_clk_teardown(bcm_clk);
+}
+
+struct clk *kona_clk_setup(struct kona_clk *bcm_clk)
+{
+ struct clk_init_data *init_data = &bcm_clk->init_data;
+ struct clk *clk = NULL;
+
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ if (peri_clk_setup(bcm_clk->u.data, init_data))
+ return NULL;
+ break;
+ default:
+ pr_err("%s: clock type %d invalid for %s\n", __func__,
+ (int)bcm_clk->type, init_data->name);
+ return NULL;
+ }
+
+ /* Make sure everything makes sense before we set it up */
+ if (!kona_clk_valid(bcm_clk)) {
+ pr_err("%s: clock data invalid for %s\n", __func__,
+ init_data->name);
+ goto out_teardown;
+ }
+
+ bcm_clk->hw.init = init_data;
+ clk = clk_register(NULL, &bcm_clk->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: error registering clock %s (%ld)\n", __func__,
+ init_data->name, PTR_ERR(clk));
+ goto out_teardown;
+ }
+ BUG_ON(!clk);
+
+ return clk;
+out_teardown:
+ bcm_clk_teardown(bcm_clk);
+
+ return NULL;
+}
+
+static void ccu_clks_teardown(struct ccu_data *ccu)
+{
+ u32 i;
+
+ for (i = 0; i < ccu->clk_data.clk_num; i++)
+ kona_clk_teardown(ccu->clk_data.clks[i]);
+ kfree(ccu->clk_data.clks);
+}
+
+static void kona_ccu_teardown(struct ccu_data *ccu)
+{
+ kfree(ccu->clk_data.clks);
+ ccu->clk_data.clks = NULL;
+ if (!ccu->base)
+ return;
+
+ of_clk_del_provider(ccu->node); /* safe if never added */
+ ccu_clks_teardown(ccu);
+ list_del(&ccu->links);
+ of_node_put(ccu->node);
+ ccu->node = NULL;
+ iounmap(ccu->base);
+ ccu->base = NULL;
+}
+
+static bool ccu_data_valid(struct ccu_data *ccu)
+{
+ struct ccu_policy *ccu_policy;
+
+ if (!ccu_data_offsets_valid(ccu))
+ return false;
+
+ ccu_policy = &ccu->policy;
+ if (ccu_policy_exists(ccu_policy))
+ if (!ccu_policy_valid(ccu_policy, ccu->name))
+ return false;
+
+ return true;
+}
+
+/*
+ * Set up a CCU. Call the provided ccu_clks_setup callback to
+ * initialize the array of clocks provided by the CCU.
+ */
+void __init kona_dt_ccu_setup(struct ccu_data *ccu,
+ struct device_node *node)
+{
+ struct resource res = { 0 };
+ resource_size_t range;
+ unsigned int i;
+ int ret;
+
+ if (ccu->clk_data.clk_num) {
+ size_t size;
+
+ size = ccu->clk_data.clk_num * sizeof(*ccu->clk_data.clks);
+ ccu->clk_data.clks = kzalloc(size, GFP_KERNEL);
+ if (!ccu->clk_data.clks) {
+ pr_err("%s: unable to allocate %u clocks for %s\n",
+ __func__, ccu->clk_data.clk_num, node->name);
+ return;
+ }
+ }
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("%s: no valid CCU registers found for %s\n", __func__,
+ node->name);
+ goto out_err;
+ }
+
+ range = resource_size(&res);
+ if (range > (resource_size_t)U32_MAX) {
+ pr_err("%s: address range too large for %s\n", __func__,
+ node->name);
+ goto out_err;
+ }
+
+ ccu->range = (u32)range;
+
+ if (!ccu_data_valid(ccu)) {
+ pr_err("%s: ccu data not valid for %s\n", __func__, node->name);
+ goto out_err;
+ }
+
+ ccu->base = ioremap(res.start, ccu->range);
+ if (!ccu->base) {
+ pr_err("%s: unable to map CCU registers for %s\n", __func__,
+ node->name);
+ goto out_err;
+ }
+ ccu->node = of_node_get(node);
+ list_add_tail(&ccu->links, &ccu_list);
+
+ /*
+ * Set up each defined kona clock and save the result in
+ * the clock framework clock array (in ccu->data). Then
+ * register as a provider for these clocks.
+ */
+ for (i = 0; i < ccu->clk_data.clk_num; i++) {
+ if (!ccu->kona_clks[i].ccu)
+ continue;
+ ccu->clk_data.clks[i] = kona_clk_setup(&ccu->kona_clks[i]);
+ }
+
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &ccu->clk_data);
+ if (ret) {
+ pr_err("%s: error adding ccu %s as provider (%d)\n", __func__,
+ node->name, ret);
+ goto out_err;
+ }
+
+ if (!kona_ccu_init(ccu))
+ pr_err("Broadcom %s initialization had errors\n", node->name);
+
+ return;
+out_err:
+ kona_ccu_teardown(ccu);
+ pr_err("Broadcom %s setup aborted\n", node->name);
+}
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
new file mode 100644
index 00000000000..95af2e665dd
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona.c
@@ -0,0 +1,1281 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "clk-kona.h"
+
+#include <linux/delay.h>
+
+/*
+ * "Policies" affect the frequencies of bus clocks provided by a
+ * CCU. (I believe these polices are named "Deep Sleep", "Economy",
+ * "Normal", and "Turbo".) A lower policy number has lower power
+ * consumption, and policy 2 is the default.
+ */
+#define CCU_POLICY_COUNT 4
+
+#define CCU_ACCESS_PASSWORD 0xA5A500
+#define CLK_GATE_DELAY_LOOP 2000
+
+/* Bitfield operations */
+
+/* Produces a mask of set bits covering a range of a 32-bit value */
+static inline u32 bitfield_mask(u32 shift, u32 width)
+{
+ return ((1 << width) - 1) << shift;
+}
+
+/* Extract the value of a bitfield found within a given register value */
+static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
+{
+ return (reg_val & bitfield_mask(shift, width)) >> shift;
+}
+
+/* Replace the value of a bitfield found within a given register value */
+static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
+{
+ u32 mask = bitfield_mask(shift, width);
+
+ return (reg_val & ~mask) | (val << shift);
+}
+
+/* Divider and scaling helpers */
+
+/*
+ * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
+ * unsigned. Note that unlike do_div(), the remainder is discarded
+ * and the return value is the quotient (not the remainder).
+ */
+u64 do_div_round_closest(u64 dividend, unsigned long divisor)
+{
+ u64 result;
+
+ result = dividend + ((u64)divisor >> 1);
+ (void)do_div(result, divisor);
+
+ return result;
+}
+
+/* Convert a divider into the scaled divisor value it represents. */
+static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
+{
+ return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
+}
+
+/*
+ * Build a scaled divider value as close as possible to the
+ * given whole part (div_value) and fractional part (expressed
+ * in billionths).
+ */
+u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
+{
+ u64 combined;
+
+ BUG_ON(!div_value);
+ BUG_ON(billionths >= BILLION);
+
+ combined = (u64)div_value * BILLION + billionths;
+ combined <<= div->u.s.frac_width;
+
+ return do_div_round_closest(combined, BILLION);
+}
+
+/* The scaled minimum divisor representable by a divider */
+static inline u64
+scaled_div_min(struct bcm_clk_div *div)
+{
+ if (divider_is_fixed(div))
+ return (u64)div->u.fixed;
+
+ return scaled_div_value(div, 0);
+}
+
+/* The scaled maximum divisor representable by a divider */
+u64 scaled_div_max(struct bcm_clk_div *div)
+{
+ u32 reg_div;
+
+ if (divider_is_fixed(div))
+ return (u64)div->u.fixed;
+
+ reg_div = ((u32)1 << div->u.s.width) - 1;
+
+ return scaled_div_value(div, reg_div);
+}
+
+/*
+ * Convert a scaled divisor into its divider representation as
+ * stored in a divider register field.
+ */
+static inline u32
+divider(struct bcm_clk_div *div, u64 scaled_div)
+{
+ BUG_ON(scaled_div < scaled_div_min(div));
+ BUG_ON(scaled_div > scaled_div_max(div));
+
+ return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
+}
+
+/* Return a rate scaled for use when dividing by a scaled divisor. */
+static inline u64
+scale_rate(struct bcm_clk_div *div, u32 rate)
+{
+ if (divider_is_fixed(div))
+ return (u64)rate;
+
+ return (u64)rate << div->u.s.frac_width;
+}
+
+/* CCU access */
+
+/* Read a 32-bit register value from a CCU's address space. */
+static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
+{
+ return readl(ccu->base + reg_offset);
+}
+
+/* Write a 32-bit register value into a CCU's address space. */
+static inline void
+__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
+{
+ writel(reg_val, ccu->base + reg_offset);
+}
+
+static inline unsigned long ccu_lock(struct ccu_data *ccu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccu->lock, flags);
+
+ return flags;
+}
+static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
+{
+ spin_unlock_irqrestore(&ccu->lock, flags);
+}
+
+/*
+ * Enable/disable write access to CCU protected registers. The
+ * WR_ACCESS register for all CCUs is at offset 0.
+ */
+static inline void __ccu_write_enable(struct ccu_data *ccu)
+{
+ if (ccu->write_enabled) {
+ pr_err("%s: access already enabled for %s\n", __func__,
+ ccu->name);
+ return;
+ }
+ ccu->write_enabled = true;
+ __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
+}
+
+static inline void __ccu_write_disable(struct ccu_data *ccu)
+{
+ if (!ccu->write_enabled) {
+ pr_err("%s: access wasn't enabled for %s\n", __func__,
+ ccu->name);
+ return;
+ }
+
+ __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
+ ccu->write_enabled = false;
+}
+
+/*
+ * Poll a register in a CCU's address space, returning when the
+ * specified bit in that register's value is set (or clear). Delay
+ * a microsecond after each read of the register. Returns true if
+ * successful, or false if we gave up trying.
+ *
+ * Caller must ensure the CCU lock is held.
+ */
+static inline bool
+__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
+{
+ unsigned int tries;
+ u32 bit_mask = 1 << bit;
+
+ for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
+ u32 val;
+ bool bit_val;
+
+ val = __ccu_read(ccu, reg_offset);
+ bit_val = (val & bit_mask) != 0;
+ if (bit_val == want)
+ return true;
+ udelay(1);
+ }
+ pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
+ ccu->name, reg_offset, bit, want ? "set" : "clear");
+
+ return false;
+}
+
+/* Policy operations */
+
+static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync)
+{
+ struct bcm_policy_ctl *control = &ccu->policy.control;
+ u32 offset;
+ u32 go_bit;
+ u32 mask;
+ bool ret;
+
+ /* If we don't need to control policy for this CCU, we're done. */
+ if (!policy_ctl_exists(control))
+ return true;
+
+ offset = control->offset;
+ go_bit = control->go_bit;
+
+ /* Ensure we're not busy before we start */
+ ret = __ccu_wait_bit(ccu, offset, go_bit, false);
+ if (!ret) {
+ pr_err("%s: ccu %s policy engine wouldn't go idle\n",
+ __func__, ccu->name);
+ return false;
+ }
+
+ /*
+ * If it's a synchronous request, we'll wait for the voltage
+ * and frequency of the active load to stabilize before
+ * returning. To do this we select the active load by
+ * setting the ATL bit.
+ *
+ * An asynchronous request instead ramps the voltage in the
+ * background, and when that process stabilizes, the target
+ * load is copied to the active load and the CCU frequency
+ * is switched. We do this by selecting the target load
+ * (ATL bit clear) and setting the request auto-copy (AC bit
+ * set).
+ *
+ * Note, we do NOT read-modify-write this register.
+ */
+ mask = (u32)1 << go_bit;
+ if (sync)
+ mask |= 1 << control->atl_bit;
+ else
+ mask |= 1 << control->ac_bit;
+ __ccu_write(ccu, offset, mask);
+
+ /* Wait for indication that operation is complete. */
+ ret = __ccu_wait_bit(ccu, offset, go_bit, false);
+ if (!ret)
+ pr_err("%s: ccu %s policy engine never started\n",
+ __func__, ccu->name);
+
+ return ret;
+}
+
+static bool __ccu_policy_engine_stop(struct ccu_data *ccu)
+{
+ struct bcm_lvm_en *enable = &ccu->policy.enable;
+ u32 offset;
+ u32 enable_bit;
+ bool ret;
+
+ /* If we don't need to control policy for this CCU, we're done. */
+ if (!policy_lvm_en_exists(enable))
+ return true;
+
+ /* Ensure we're not busy before we start */
+ offset = enable->offset;
+ enable_bit = enable->bit;
+ ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
+ if (!ret) {
+ pr_err("%s: ccu %s policy engine already stopped\n",
+ __func__, ccu->name);
+ return false;
+ }
+
+ /* Now set the bit to stop the engine (NO read-modify-write) */
+ __ccu_write(ccu, offset, (u32)1 << enable_bit);
+
+ /* Wait for indication that it has stopped. */
+ ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
+ if (!ret)
+ pr_err("%s: ccu %s policy engine never stopped\n",
+ __func__, ccu->name);
+
+ return ret;
+}
+
+/*
+ * A CCU has four operating conditions ("policies"), and some clocks
+ * can be disabled or enabled based on which policy is currently in
+ * effect. Such clocks have a bit in a "policy mask" register for
+ * each policy indicating whether the clock is enabled for that
+ * policy or not. The bit position for a clock is the same for all
+ * four registers, and the 32-bit registers are at consecutive
+ * addresses.
+ */
+static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy)
+{
+ u32 offset;
+ u32 mask;
+ int i;
+ bool ret;
+
+ if (!policy_exists(policy))
+ return true;
+
+ /*
+ * We need to stop the CCU policy engine to allow update
+ * of our policy bits.
+ */
+ if (!__ccu_policy_engine_stop(ccu)) {
+ pr_err("%s: unable to stop CCU %s policy engine\n",
+ __func__, ccu->name);
+ return false;
+ }
+
+ /*
+ * For now, if a clock defines its policy bit we just mark
+ * it "enabled" for all four policies.
+ */
+ offset = policy->offset;
+ mask = (u32)1 << policy->bit;
+ for (i = 0; i < CCU_POLICY_COUNT; i++) {
+ u32 reg_val;
+
+ reg_val = __ccu_read(ccu, offset);
+ reg_val |= mask;
+ __ccu_write(ccu, offset, reg_val);
+ offset += sizeof(u32);
+ }
+
+ /* We're done updating; fire up the policy engine again. */
+ ret = __ccu_policy_engine_start(ccu, true);
+ if (!ret)
+ pr_err("%s: unable to restart CCU %s policy engine\n",
+ __func__, ccu->name);
+
+ return ret;
+}
+
+/* Gate operations */
+
+/* Determine whether a clock is gated. CCU lock must be held. */
+static bool
+__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ u32 bit_mask;
+ u32 reg_val;
+
+ /* If there is no gate we can assume it's enabled. */
+ if (!gate_exists(gate))
+ return true;
+
+ bit_mask = 1 << gate->status_bit;
+ reg_val = __ccu_read(ccu, gate->offset);
+
+ return (reg_val & bit_mask) != 0;
+}
+
+/* Determine whether a clock is gated. */
+static bool
+is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ long flags;
+ bool ret;
+
+ /* Avoid taking the lock if we can */
+ if (!gate_exists(gate))
+ return true;
+
+ flags = ccu_lock(ccu);
+ ret = __is_clk_gate_enabled(ccu, gate);
+ ccu_unlock(ccu, flags);
+
+ return ret;
+}
+
+/*
+ * Commit our desired gate state to the hardware.
+ * Returns true if successful, false otherwise.
+ */
+static bool
+__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ u32 reg_val;
+ u32 mask;
+ bool enabled = false;
+
+ BUG_ON(!gate_exists(gate));
+ if (!gate_is_sw_controllable(gate))
+ return true; /* Nothing we can change */
+
+ reg_val = __ccu_read(ccu, gate->offset);
+
+ /* For a hardware/software gate, set which is in control */
+ if (gate_is_hw_controllable(gate)) {
+ mask = (u32)1 << gate->hw_sw_sel_bit;
+ if (gate_is_sw_managed(gate))
+ reg_val |= mask;
+ else
+ reg_val &= ~mask;
+ }
+
+ /*
+ * If software is in control, enable or disable the gate.
+ * If hardware is, clear the enabled bit for good measure.
+ * If a software controlled gate can't be disabled, we're
+ * required to write a 0 into the enable bit (but the gate
+ * will be enabled).
+ */
+ mask = (u32)1 << gate->en_bit;
+ if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
+ !gate_is_no_disable(gate))
+ reg_val |= mask;
+ else
+ reg_val &= ~mask;
+
+ __ccu_write(ccu, gate->offset, reg_val);
+
+ /* For a hardware controlled gate, we're done */
+ if (!gate_is_sw_managed(gate))
+ return true;
+
+ /* Otherwise wait for the gate to be in desired state */
+ return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
+}
+
+/*
+ * Initialize a gate. Our desired state (hardware/software select,
+ * and if software, its enable state) is committed to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ if (!gate_exists(gate))
+ return true;
+ return __gate_commit(ccu, gate);
+}
+
+/*
+ * Set a gate to enabled or disabled state. Does nothing if the
+ * gate is not currently under software control, or if it is already
+ * in the requested state. Returns true if successful, false
+ * otherwise. CCU lock must be held.
+ */
+static bool
+__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
+{
+ bool ret;
+
+ if (!gate_exists(gate) || !gate_is_sw_managed(gate))
+ return true; /* Nothing to do */
+
+ if (!enable && gate_is_no_disable(gate)) {
+ pr_warn("%s: invalid gate disable request (ignoring)\n",
+ __func__);
+ return true;
+ }
+
+ if (enable == gate_is_enabled(gate))
+ return true; /* No change */
+
+ gate_flip_enabled(gate);
+ ret = __gate_commit(ccu, gate);
+ if (!ret)
+ gate_flip_enabled(gate); /* Revert the change */
+
+ return ret;
+}
+
+/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
+static int clk_gate(struct ccu_data *ccu, const char *name,
+ struct bcm_clk_gate *gate, bool enable)
+{
+ unsigned long flags;
+ bool success;
+
+ /*
+ * Avoid taking the lock if we can. We quietly ignore
+ * requests to change state that don't make sense.
+ */
+ if (!gate_exists(gate) || !gate_is_sw_managed(gate))
+ return 0;
+ if (!enable && gate_is_no_disable(gate))
+ return 0;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ success = __clk_gate(ccu, gate, enable);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (success)
+ return 0;
+
+ pr_err("%s: failed to %s gate for %s\n", __func__,
+ enable ? "enable" : "disable", name);
+
+ return -EIO;
+}
+
+/* Hysteresis operations */
+
+/*
+ * If a clock gate requires a turn-off delay it will have
+ * "hysteresis" register bits defined. The first, if set, enables
+ * the delay; and if enabled, the second bit determines whether the
+ * delay is "low" or "high" (1 means high). For now, if it's
+ * defined for a clock, we set it.
+ */
+static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst)
+{
+ u32 offset;
+ u32 reg_val;
+ u32 mask;
+
+ if (!hyst_exists(hyst))
+ return true;
+
+ offset = hyst->offset;
+ mask = (u32)1 << hyst->en_bit;
+ mask |= (u32)1 << hyst->val_bit;
+
+ reg_val = __ccu_read(ccu, offset);
+ reg_val |= mask;
+ __ccu_write(ccu, offset, reg_val);
+
+ return true;
+}
+
+/* Trigger operations */
+
+/*
+ * Caller must ensure CCU lock is held and access is enabled.
+ * Returns true if successful, false otherwise.
+ */
+static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
+{
+ /* Trigger the clock and wait for it to finish */
+ __ccu_write(ccu, trig->offset, 1 << trig->bit);
+
+ return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
+}
+
+/* Divider operations */
+
+/* Read a divider value and return the scaled divisor it represents. */
+static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
+{
+ unsigned long flags;
+ u32 reg_val;
+ u32 reg_div;
+
+ if (divider_is_fixed(div))
+ return (u64)div->u.fixed;
+
+ flags = ccu_lock(ccu);
+ reg_val = __ccu_read(ccu, div->u.s.offset);
+ ccu_unlock(ccu, flags);
+
+ /* Extract the full divider field from the register value */
+ reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
+
+ /* Return the scaled divisor value it represents */
+ return scaled_div_value(div, reg_div);
+}
+
+/*
+ * Convert a divider's scaled divisor value into its recorded form
+ * and commit it into the hardware divider register.
+ *
+ * Returns 0 on success. Returns -EINVAL for invalid arguments.
+ * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
+ */
+static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig)
+{
+ bool enabled;
+ u32 reg_div;
+ u32 reg_val;
+ int ret = 0;
+
+ BUG_ON(divider_is_fixed(div));
+
+ /*
+ * If we're just initializing the divider, and no initial
+ * state was defined in the device tree, we just find out
+ * what its current value is rather than updating it.
+ */
+ if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
+ reg_val = __ccu_read(ccu, div->u.s.offset);
+ reg_div = bitfield_extract(reg_val, div->u.s.shift,
+ div->u.s.width);
+ div->u.s.scaled_div = scaled_div_value(div, reg_div);
+
+ return 0;
+ }
+
+ /* Convert the scaled divisor to the value we need to record */
+ reg_div = divider(div, div->u.s.scaled_div);
+
+ /* Clock needs to be enabled before changing the rate */
+ enabled = __is_clk_gate_enabled(ccu, gate);
+ if (!enabled && !__clk_gate(ccu, gate, true)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* Replace the divider value and record the result */
+ reg_val = __ccu_read(ccu, div->u.s.offset);
+ reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
+ reg_div);
+ __ccu_write(ccu, div->u.s.offset, reg_val);
+
+ /* If the trigger fails we still want to disable the gate */
+ if (!__clk_trigger(ccu, trig))
+ ret = -EIO;
+
+ /* Disable the clock again if it was disabled to begin with */
+ if (!enabled && !__clk_gate(ccu, gate, false))
+ ret = ret ? ret : -ENXIO; /* return first error */
+out:
+ return ret;
+}
+
+/*
+ * Initialize a divider by committing our desired state to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig)
+{
+ if (!divider_exists(div) || divider_is_fixed(div))
+ return true;
+ return !__div_commit(ccu, gate, div, trig);
+}
+
+static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig,
+ u64 scaled_div)
+{
+ unsigned long flags;
+ u64 previous;
+ int ret;
+
+ BUG_ON(divider_is_fixed(div));
+
+ previous = div->u.s.scaled_div;
+ if (previous == scaled_div)
+ return 0; /* No change */
+
+ div->u.s.scaled_div = scaled_div;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ ret = __div_commit(ccu, gate, div, trig);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (ret)
+ div->u.s.scaled_div = previous; /* Revert the change */
+
+ return ret;
+
+}
+
+/* Common clock rate helpers */
+
+/*
+ * Implement the common clock framework recalc_rate method, taking
+ * into account a divider and an optional pre-divider. The
+ * pre-divider register pointer may be NULL.
+ */
+static unsigned long clk_recalc_rate(struct ccu_data *ccu,
+ struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
+ unsigned long parent_rate)
+{
+ u64 scaled_parent_rate;
+ u64 scaled_div;
+ u64 result;
+
+ if (!divider_exists(div))
+ return parent_rate;
+
+ if (parent_rate > (unsigned long)LONG_MAX)
+ return 0; /* actually this would be a caller bug */
+
+ /*
+ * If there is a pre-divider, divide the scaled parent rate
+ * by the pre-divider value first. In this case--to improve
+ * accuracy--scale the parent rate by *both* the pre-divider
+ * value and the divider before actually computing the
+ * result of the pre-divider.
+ *
+ * If there's only one divider, just scale the parent rate.
+ */
+ if (pre_div && divider_exists(pre_div)) {
+ u64 scaled_rate;
+
+ scaled_rate = scale_rate(pre_div, parent_rate);
+ scaled_rate = scale_rate(div, scaled_rate);
+ scaled_div = divider_read_scaled(ccu, pre_div);
+ scaled_parent_rate = do_div_round_closest(scaled_rate,
+ scaled_div);
+ } else {
+ scaled_parent_rate = scale_rate(div, parent_rate);
+ }
+
+ /*
+ * Get the scaled divisor value, and divide the scaled
+ * parent rate by that to determine this clock's resulting
+ * rate.
+ */
+ scaled_div = divider_read_scaled(ccu, div);
+ result = do_div_round_closest(scaled_parent_rate, scaled_div);
+
+ return (unsigned long)result;
+}
+
+/*
+ * Compute the output rate produced when a given parent rate is fed
+ * into two dividers. The pre-divider can be NULL, and even if it's
+ * non-null it may be nonexistent. It's also OK for the divider to
+ * be nonexistent, and in that case the pre-divider is also ignored.
+ *
+ * If scaled_div is non-null, it is used to return the scaled divisor
+ * value used by the (downstream) divider to produce that rate.
+ */
+static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
+ struct bcm_clk_div *pre_div,
+ unsigned long rate, unsigned long parent_rate,
+ u64 *scaled_div)
+{
+ u64 scaled_parent_rate;
+ u64 min_scaled_div;
+ u64 max_scaled_div;
+ u64 best_scaled_div;
+ u64 result;
+
+ BUG_ON(!divider_exists(div));
+ BUG_ON(!rate);
+ BUG_ON(parent_rate > (u64)LONG_MAX);
+
+ /*
+ * If there is a pre-divider, divide the scaled parent rate
+ * by the pre-divider value first. In this case--to improve
+ * accuracy--scale the parent rate by *both* the pre-divider
+ * value and the divider before actually computing the
+ * result of the pre-divider.
+ *
+ * If there's only one divider, just scale the parent rate.
+ *
+ * For simplicity we treat the pre-divider as fixed (for now).
+ */
+ if (divider_exists(pre_div)) {
+ u64 scaled_rate;
+ u64 scaled_pre_div;
+
+ scaled_rate = scale_rate(pre_div, parent_rate);
+ scaled_rate = scale_rate(div, scaled_rate);
+ scaled_pre_div = divider_read_scaled(ccu, pre_div);
+ scaled_parent_rate = do_div_round_closest(scaled_rate,
+ scaled_pre_div);
+ } else {
+ scaled_parent_rate = scale_rate(div, parent_rate);
+ }
+
+ /*
+ * Compute the best possible divider and ensure it is in
+ * range. A fixed divider can't be changed, so just report
+ * the best we can do.
+ */
+ if (!divider_is_fixed(div)) {
+ best_scaled_div = do_div_round_closest(scaled_parent_rate,
+ rate);
+ min_scaled_div = scaled_div_min(div);
+ max_scaled_div = scaled_div_max(div);
+ if (best_scaled_div > max_scaled_div)
+ best_scaled_div = max_scaled_div;
+ else if (best_scaled_div < min_scaled_div)
+ best_scaled_div = min_scaled_div;
+ } else {
+ best_scaled_div = divider_read_scaled(ccu, div);
+ }
+
+ /* OK, figure out the resulting rate */
+ result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
+
+ if (scaled_div)
+ *scaled_div = best_scaled_div;
+
+ return (long)result;
+}
+
+/* Common clock parent helpers */
+
+/*
+ * For a given parent selector (register field) value, find the
+ * index into a selector's parent_sel array that contains it.
+ * Returns the index, or BAD_CLK_INDEX if it's not found.
+ */
+static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
+{
+ u8 i;
+
+ BUG_ON(sel->parent_count > (u32)U8_MAX);
+ for (i = 0; i < sel->parent_count; i++)
+ if (sel->parent_sel[i] == parent_sel)
+ return i;
+ return BAD_CLK_INDEX;
+}
+
+/*
+ * Fetch the current value of the selector, and translate that into
+ * its corresponding index in the parent array we registered with
+ * the clock framework.
+ *
+ * Returns parent array index that corresponds with the value found,
+ * or BAD_CLK_INDEX if the found value is out of range.
+ */
+static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
+{
+ unsigned long flags;
+ u32 reg_val;
+ u32 parent_sel;
+ u8 index;
+
+ /* If there's no selector, there's only one parent */
+ if (!selector_exists(sel))
+ return 0;
+
+ /* Get the value in the selector register */
+ flags = ccu_lock(ccu);
+ reg_val = __ccu_read(ccu, sel->offset);
+ ccu_unlock(ccu, flags);
+
+ parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
+
+ /* Look up that selector's parent array index and return it */
+ index = parent_index(sel, parent_sel);
+ if (index == BAD_CLK_INDEX)
+ pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
+ __func__, parent_sel, ccu->name, sel->offset);
+
+ return index;
+}
+
+/*
+ * Commit our desired selector value to the hardware.
+ *
+ * Returns 0 on success. Returns -EINVAL for invalid arguments.
+ * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
+ */
+static int
+__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
+{
+ u32 parent_sel;
+ u32 reg_val;
+ bool enabled;
+ int ret = 0;
+
+ BUG_ON(!selector_exists(sel));
+
+ /*
+ * If we're just initializing the selector, and no initial
+ * state was defined in the device tree, we just find out
+ * what its current value is rather than updating it.
+ */
+ if (sel->clk_index == BAD_CLK_INDEX) {
+ u8 index;
+
+ reg_val = __ccu_read(ccu, sel->offset);
+ parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
+ index = parent_index(sel, parent_sel);
+ if (index == BAD_CLK_INDEX)
+ return -EINVAL;
+ sel->clk_index = index;
+
+ return 0;
+ }
+
+ BUG_ON((u32)sel->clk_index >= sel->parent_count);
+ parent_sel = sel->parent_sel[sel->clk_index];
+
+ /* Clock needs to be enabled before changing the parent */
+ enabled = __is_clk_gate_enabled(ccu, gate);
+ if (!enabled && !__clk_gate(ccu, gate, true))
+ return -ENXIO;
+
+ /* Replace the selector value and record the result */
+ reg_val = __ccu_read(ccu, sel->offset);
+ reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
+ __ccu_write(ccu, sel->offset, reg_val);
+
+ /* If the trigger fails we still want to disable the gate */
+ if (!__clk_trigger(ccu, trig))
+ ret = -EIO;
+
+ /* Disable the clock again if it was disabled to begin with */
+ if (!enabled && !__clk_gate(ccu, gate, false))
+ ret = ret ? ret : -ENXIO; /* return first error */
+
+ return ret;
+}
+
+/*
+ * Initialize a selector by committing our desired state to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
+{
+ if (!selector_exists(sel))
+ return true;
+ return !__sel_commit(ccu, gate, sel, trig);
+}
+
+/*
+ * Write a new value into a selector register to switch to a
+ * different parent clock. Returns 0 on success, or an error code
+ * (from __sel_commit()) otherwise.
+ */
+static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
+ u8 index)
+{
+ unsigned long flags;
+ u8 previous;
+ int ret;
+
+ previous = sel->clk_index;
+ if (previous == index)
+ return 0; /* No change */
+
+ sel->clk_index = index;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ ret = __sel_commit(ccu, gate, sel, trig);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (ret)
+ sel->clk_index = previous; /* Revert the change */
+
+ return ret;
+}
+
+/* Clock operations */
+
+static int kona_peri_clk_enable(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
+
+ return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
+}
+
+static void kona_peri_clk_disable(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
+
+ (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
+}
+
+static int kona_peri_clk_is_enabled(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
+
+ return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
+}
+
+static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->u.peri;
+
+ return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
+ parent_rate);
+}
+
+static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_div *div = &bcm_clk->u.peri->div;
+
+ if (!divider_exists(div))
+ return __clk_get_rate(hw->clk);
+
+ /* Quietly avoid a zero rate */
+ return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
+ rate ? rate : 1, *parent_rate, NULL);
+}
+
+static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate, struct clk **best_parent)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct clk *clk = hw->clk;
+ struct clk *current_parent;
+ unsigned long parent_rate;
+ unsigned long best_delta;
+ unsigned long best_rate;
+ u32 parent_count;
+ u32 which;
+
+ /*
+ * If there is no other parent to choose, use the current one.
+ * Note: We don't honor (or use) CLK_SET_RATE_NO_REPARENT.
+ */
+ WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
+ parent_count = (u32)bcm_clk->init_data.num_parents;
+ if (parent_count < 2)
+ return kona_peri_clk_round_rate(hw, rate, best_parent_rate);
+
+ /* Unless we can do better, stick with current parent */
+ current_parent = clk_get_parent(clk);
+ parent_rate = __clk_get_rate(current_parent);
+ best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
+ best_delta = abs(best_rate - rate);
+
+ /* Check whether any other parent clock can produce a better result */
+ for (which = 0; which < parent_count; which++) {
+ struct clk *parent = clk_get_parent_by_index(clk, which);
+ unsigned long delta;
+ unsigned long other_rate;
+
+ BUG_ON(!parent);
+ if (parent == current_parent)
+ continue;
+
+ /* We don't support CLK_SET_RATE_PARENT */
+ parent_rate = __clk_get_rate(parent);
+ other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
+ delta = abs(other_rate - rate);
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_rate = other_rate;
+ *best_parent = parent;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ return best_rate;
+}
+
+static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->u.peri;
+ struct bcm_clk_sel *sel = &data->sel;
+ struct bcm_clk_trig *trig;
+ int ret;
+
+ BUG_ON(index >= sel->parent_count);
+
+ /* If there's only one parent we don't require a selector */
+ if (!selector_exists(sel))
+ return 0;
+
+ /*
+ * The regular trigger is used by default, but if there's a
+ * pre-trigger we want to use that instead.
+ */
+ trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
+ : &data->trig;
+
+ ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
+ if (ret == -ENXIO) {
+ pr_err("%s: gating failure for %s\n", __func__,
+ bcm_clk->init_data.name);
+ ret = -EIO; /* Don't proliferate weird errors */
+ } else if (ret == -EIO) {
+ pr_err("%s: %strigger failed for %s\n", __func__,
+ trig == &data->pre_trig ? "pre-" : "",
+ bcm_clk->init_data.name);
+ }
+
+ return ret;
+}
+
+static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->u.peri;
+ u8 index;
+
+ index = selector_read_index(bcm_clk->ccu, &data->sel);
+
+ /* Not all callers would handle an out-of-range value gracefully */
+ return index == BAD_CLK_INDEX ? 0 : index;
+}
+
+static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->u.peri;
+ struct bcm_clk_div *div = &data->div;
+ u64 scaled_div = 0;
+ int ret;
+
+ if (parent_rate > (unsigned long)LONG_MAX)
+ return -EINVAL;
+
+ if (rate == __clk_get_rate(hw->clk))
+ return 0;
+
+ if (!divider_exists(div))
+ return rate == parent_rate ? 0 : -EINVAL;
+
+ /*
+ * A fixed divider can't be changed. (Nor can a fixed
+ * pre-divider be, but for now we never actually try to
+ * change that.) Tolerate a request for a no-op change.
+ */
+ if (divider_is_fixed(&data->div))
+ return rate == parent_rate ? 0 : -EINVAL;
+
+ /*
+ * Get the scaled divisor value needed to achieve a clock
+ * rate as close as possible to what was requested, given
+ * the parent clock rate supplied.
+ */
+ (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
+ rate ? rate : 1, parent_rate, &scaled_div);
+
+ /*
+ * We aren't updating any pre-divider at this point, so
+ * we'll use the regular trigger.
+ */
+ ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
+ &data->trig, scaled_div);
+ if (ret == -ENXIO) {
+ pr_err("%s: gating failure for %s\n", __func__,
+ bcm_clk->init_data.name);
+ ret = -EIO; /* Don't proliferate weird errors */
+ } else if (ret == -EIO) {
+ pr_err("%s: trigger failed for %s\n", __func__,
+ bcm_clk->init_data.name);
+ }
+
+ return ret;
+}
+
+struct clk_ops kona_peri_clk_ops = {
+ .enable = kona_peri_clk_enable,
+ .disable = kona_peri_clk_disable,
+ .is_enabled = kona_peri_clk_is_enabled,
+ .recalc_rate = kona_peri_clk_recalc_rate,
+ .determine_rate = kona_peri_clk_determine_rate,
+ .set_parent = kona_peri_clk_set_parent,
+ .get_parent = kona_peri_clk_get_parent,
+ .set_rate = kona_peri_clk_set_rate,
+};
+
+/* Put a peripheral clock into its initial state */
+static bool __peri_clk_init(struct kona_clk *bcm_clk)
+{
+ struct ccu_data *ccu = bcm_clk->ccu;
+ struct peri_clk_data *peri = bcm_clk->u.peri;
+ const char *name = bcm_clk->init_data.name;
+ struct bcm_clk_trig *trig;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ if (!policy_init(ccu, &peri->policy)) {
+ pr_err("%s: error initializing policy for %s\n",
+ __func__, name);
+ return false;
+ }
+ if (!gate_init(ccu, &peri->gate)) {
+ pr_err("%s: error initializing gate for %s\n", __func__, name);
+ return false;
+ }
+ if (!hyst_init(ccu, &peri->hyst)) {
+ pr_err("%s: error initializing hyst for %s\n", __func__, name);
+ return false;
+ }
+ if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
+ pr_err("%s: error initializing divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ /*
+ * For the pre-divider and selector, the pre-trigger is used
+ * if it's present, otherwise we just use the regular trigger.
+ */
+ trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
+ : &peri->trig;
+
+ if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
+ pr_err("%s: error initializing pre-divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
+ pr_err("%s: error initializing selector for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ return true;
+}
+
+static bool __kona_clk_init(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ return __peri_clk_init(bcm_clk);
+ default:
+ BUG();
+ }
+ return -EINVAL;
+}
+
+/* Set a CCU and all its clocks into their desired initial state */
+bool __init kona_ccu_init(struct ccu_data *ccu)
+{
+ unsigned long flags;
+ unsigned int which;
+ struct clk **clks = ccu->clk_data.clks;
+ bool success = true;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ for (which = 0; which < ccu->clk_data.clk_num; which++) {
+ struct kona_clk *bcm_clk;
+
+ if (!clks[which])
+ continue;
+ bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
+ success &= __kona_clk_init(bcm_clk);
+ }
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+ return success;
+}
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
new file mode 100644
index 00000000000..2537b307291
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CLK_KONA_H
+#define _CLK_KONA_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/clk-provider.h>
+
+#define BILLION 1000000000
+
+/* The common clock framework uses u8 to represent a parent index */
+#define PARENT_COUNT_MAX ((u32)U8_MAX)
+
+#define BAD_CLK_INDEX U8_MAX /* Can't ever be valid */
+#define BAD_CLK_NAME ((const char *)-1)
+
+#define BAD_SCALED_DIV_VALUE U64_MAX
+
+/*
+ * Utility macros for object flag management. If possible, flags
+ * should be defined such that 0 is the desired default value.
+ */
+#define FLAG(type, flag) BCM_CLK_ ## type ## _FLAGS_ ## flag
+#define FLAG_SET(obj, type, flag) ((obj)->flags |= FLAG(type, flag))
+#define FLAG_CLEAR(obj, type, flag) ((obj)->flags &= ~(FLAG(type, flag)))
+#define FLAG_FLIP(obj, type, flag) ((obj)->flags ^= FLAG(type, flag))
+#define FLAG_TEST(obj, type, flag) (!!((obj)->flags & FLAG(type, flag)))
+
+/* CCU field state tests */
+
+#define ccu_policy_exists(ccu_policy) ((ccu_policy)->enable.offset != 0)
+
+/* Clock field state tests */
+
+#define policy_exists(policy) ((policy)->offset != 0)
+
+#define gate_exists(gate) FLAG_TEST(gate, GATE, EXISTS)
+#define gate_is_enabled(gate) FLAG_TEST(gate, GATE, ENABLED)
+#define gate_is_hw_controllable(gate) FLAG_TEST(gate, GATE, HW)
+#define gate_is_sw_controllable(gate) FLAG_TEST(gate, GATE, SW)
+#define gate_is_sw_managed(gate) FLAG_TEST(gate, GATE, SW_MANAGED)
+#define gate_is_no_disable(gate) FLAG_TEST(gate, GATE, NO_DISABLE)
+
+#define gate_flip_enabled(gate) FLAG_FLIP(gate, GATE, ENABLED)
+
+#define hyst_exists(hyst) ((hyst)->offset != 0)
+
+#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS)
+#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED)
+#define divider_has_fraction(div) (!divider_is_fixed(div) && \
+ (div)->u.s.frac_width > 0)
+
+#define selector_exists(sel) ((sel)->width != 0)
+#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS)
+
+#define policy_lvm_en_exists(enable) ((enable)->offset != 0)
+#define policy_ctl_exists(control) ((control)->offset != 0)
+
+/* Clock type, used to tell common block what it's part of */
+enum bcm_clk_type {
+ bcm_clk_none, /* undefined clock type */
+ bcm_clk_bus,
+ bcm_clk_core,
+ bcm_clk_peri
+};
+
+/*
+ * CCU policy control for clocks. Clocks can be enabled or disabled
+ * based on the CCU policy in effect. One bit in each policy mask
+ * register (one per CCU policy) represents whether the clock is
+ * enabled when that policy is effect or not. The CCU policy engine
+ * must be stopped to update these bits, and must be restarted again
+ * afterward.
+ */
+struct bcm_clk_policy {
+ u32 offset; /* first policy mask register offset */
+ u32 bit; /* bit used in all mask registers */
+};
+
+/* Policy initialization macro */
+
+#define POLICY(_offset, _bit) \
+ { \
+ .offset = (_offset), \
+ .bit = (_bit), \
+ }
+
+/*
+ * Gating control and status is managed by a 32-bit gate register.
+ *
+ * There are several types of gating available:
+ * - (no gate)
+ * A clock with no gate is assumed to be always enabled.
+ * - hardware-only gating (auto-gating)
+ * Enabling or disabling clocks with this type of gate is
+ * managed automatically by the hardware. Such clocks can be
+ * considered by the software to be enabled. The current status
+ * of auto-gated clocks can be read from the gate status bit.
+ * - software-only gating
+ * Auto-gating is not available for this type of clock.
+ * Instead, software manages whether it's enabled by setting or
+ * clearing the enable bit. The current gate status of a gate
+ * under software control can be read from the gate status bit.
+ * To ensure a change to the gating status is complete, the
+ * status bit can be polled to verify that the gate has entered
+ * the desired state.
+ * - selectable hardware or software gating
+ * Gating for this type of clock can be configured to be either
+ * under software or hardware control. Which type is in use is
+ * determined by the hw_sw_sel bit of the gate register.
+ */
+struct bcm_clk_gate {
+ u32 offset; /* gate register offset */
+ u32 status_bit; /* 0: gate is disabled; 0: gatge is enabled */
+ u32 en_bit; /* 0: disable; 1: enable */
+ u32 hw_sw_sel_bit; /* 0: hardware gating; 1: software gating */
+ u32 flags; /* BCM_CLK_GATE_FLAGS_* below */
+};
+
+/*
+ * Gate flags:
+ * HW means this gate can be auto-gated
+ * SW means the state of this gate can be software controlled
+ * NO_DISABLE means this gate is (only) enabled if under software control
+ * SW_MANAGED means the status of this gate is under software control
+ * ENABLED means this software-managed gate is *supposed* to be enabled
+ */
+#define BCM_CLK_GATE_FLAGS_EXISTS ((u32)1 << 0) /* Gate is valid */
+#define BCM_CLK_GATE_FLAGS_HW ((u32)1 << 1) /* Can auto-gate */
+#define BCM_CLK_GATE_FLAGS_SW ((u32)1 << 2) /* Software control */
+#define BCM_CLK_GATE_FLAGS_NO_DISABLE ((u32)1 << 3) /* HW or enabled */
+#define BCM_CLK_GATE_FLAGS_SW_MANAGED ((u32)1 << 4) /* SW now in control */
+#define BCM_CLK_GATE_FLAGS_ENABLED ((u32)1 << 5) /* If SW_MANAGED */
+
+/*
+ * Gate initialization macros.
+ *
+ * Any gate initially under software control will be enabled.
+ */
+
+/* A hardware/software gate initially under software control */
+#define HW_SW_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, SW_MANAGED)|FLAG(GATE, ENABLED)| \
+ FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware/software gate initially under hardware control */
+#define HW_SW_GATE_AUTO(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware-or-enabled gate (enabled if not under hardware control) */
+#define HW_ENABLE_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, NO_DISABLE)|FLAG(GATE, EXISTS), \
+ }
+
+/* A software-only gate */
+#define SW_ONLY_GATE(_offset, _status_bit, _en_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .flags = FLAG(GATE, SW)|FLAG(GATE, SW_MANAGED)| \
+ FLAG(GATE, ENABLED)|FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware-only gate */
+#define HW_ONLY_GATE(_offset, _status_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, EXISTS), \
+ }
+
+/* Gate hysteresis for clocks */
+struct bcm_clk_hyst {
+ u32 offset; /* hyst register offset (normally CLKGATE) */
+ u32 en_bit; /* bit used to enable hysteresis */
+ u32 val_bit; /* if enabled: 0 = low delay; 1 = high delay */
+};
+
+/* Hysteresis initialization macro */
+
+#define HYST(_offset, _en_bit, _val_bit) \
+ { \
+ .offset = (_offset), \
+ .en_bit = (_en_bit), \
+ .val_bit = (_val_bit), \
+ }
+
+/*
+ * Each clock can have zero, one, or two dividers which change the
+ * output rate of the clock. Each divider can be either fixed or
+ * variable. If there are two dividers, they are the "pre-divider"
+ * and the "regular" or "downstream" divider. If there is only one,
+ * there is no pre-divider.
+ *
+ * A fixed divider is any non-zero (positive) value, and it
+ * indicates how the input rate is affected by the divider.
+ *
+ * The value of a variable divider is maintained in a sub-field of a
+ * 32-bit divider register. The position of the field in the
+ * register is defined by its offset and width. The value recorded
+ * in this field is always 1 less than the value it represents.
+ *
+ * In addition, a variable divider can indicate that some subset
+ * of its bits represent a "fractional" part of the divider. Such
+ * bits comprise the low-order portion of the divider field, and can
+ * be viewed as representing the portion of the divider that lies to
+ * the right of the decimal point. Most variable dividers have zero
+ * fractional bits. Variable dividers with non-zero fraction width
+ * still record a value 1 less than the value they represent; the
+ * added 1 does *not* affect the low-order bit in this case, it
+ * affects the bits above the fractional part only. (Often in this
+ * code a divider field value is distinguished from the value it
+ * represents by referring to the latter as a "divisor".)
+ *
+ * In order to avoid dealing with fractions, divider arithmetic is
+ * performed using "scaled" values. A scaled value is one that's
+ * been left-shifted by the fractional width of a divider. Dividing
+ * a scaled value by a scaled divisor produces the desired quotient
+ * without loss of precision and without any other special handling
+ * for fractions.
+ *
+ * The recorded value of a variable divider can be modified. To
+ * modify either divider (or both), a clock must be enabled (i.e.,
+ * using its gate). In addition, a trigger register (described
+ * below) must be used to commit the change, and polled to verify
+ * the change is complete.
+ */
+struct bcm_clk_div {
+ union {
+ struct { /* variable divider */
+ u32 offset; /* divider register offset */
+ u32 shift; /* field shift */
+ u32 width; /* field width */
+ u32 frac_width; /* field fraction width */
+
+ u64 scaled_div; /* scaled divider value */
+ } s;
+ u32 fixed; /* non-zero fixed divider value */
+ } u;
+ u32 flags; /* BCM_CLK_DIV_FLAGS_* below */
+};
+
+/*
+ * Divider flags:
+ * EXISTS means this divider exists
+ * FIXED means it is a fixed-rate divider
+ */
+#define BCM_CLK_DIV_FLAGS_EXISTS ((u32)1 << 0) /* Divider is valid */
+#define BCM_CLK_DIV_FLAGS_FIXED ((u32)1 << 1) /* Fixed-value */
+
+/* Divider initialization macros */
+
+/* A fixed (non-zero) divider */
+#define FIXED_DIVIDER(_value) \
+ { \
+ .u.fixed = (_value), \
+ .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \
+ }
+
+/* A divider with an integral divisor */
+#define DIVIDER(_offset, _shift, _width) \
+ { \
+ .u.s.offset = (_offset), \
+ .u.s.shift = (_shift), \
+ .u.s.width = (_width), \
+ .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
+ .flags = FLAG(DIV, EXISTS), \
+ }
+
+/* A divider whose divisor has an integer and fractional part */
+#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \
+ { \
+ .u.s.offset = (_offset), \
+ .u.s.shift = (_shift), \
+ .u.s.width = (_width), \
+ .u.s.frac_width = (_frac_width), \
+ .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
+ .flags = FLAG(DIV, EXISTS), \
+ }
+
+/*
+ * Clocks may have multiple "parent" clocks. If there is more than
+ * one, a selector must be specified to define which of the parent
+ * clocks is currently in use. The selected clock is indicated in a
+ * sub-field of a 32-bit selector register. The range of
+ * representable selector values typically exceeds the number of
+ * available parent clocks. Occasionally the reset value of a
+ * selector field is explicitly set to a (specific) value that does
+ * not correspond to a defined input clock.
+ *
+ * We register all known parent clocks with the common clock code
+ * using a packed array (i.e., no empty slots) of (parent) clock
+ * names, and refer to them later using indexes into that array.
+ * We maintain an array of selector values indexed by common clock
+ * index values in order to map between these common clock indexes
+ * and the selector values used by the hardware.
+ *
+ * Like dividers, a selector can be modified, but to do so a clock
+ * must be enabled, and a trigger must be used to commit the change.
+ */
+struct bcm_clk_sel {
+ u32 offset; /* selector register offset */
+ u32 shift; /* field shift */
+ u32 width; /* field width */
+
+ u32 parent_count; /* number of entries in parent_sel[] */
+ u32 *parent_sel; /* array of parent selector values */
+ u8 clk_index; /* current selected index in parent_sel[] */
+};
+
+/* Selector initialization macro */
+#define SELECTOR(_offset, _shift, _width) \
+ { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .clk_index = BAD_CLK_INDEX, \
+ }
+
+/*
+ * Making changes to a variable divider or a selector for a clock
+ * requires the use of a trigger. A trigger is defined by a single
+ * bit within a register. To signal a change, a 1 is written into
+ * that bit. To determine when the change has been completed, that
+ * trigger bit is polled; the read value will be 1 while the change
+ * is in progress, and 0 when it is complete.
+ *
+ * Occasionally a clock will have more than one trigger. In this
+ * case, the "pre-trigger" will be used when changing a clock's
+ * selector and/or its pre-divider.
+ */
+struct bcm_clk_trig {
+ u32 offset; /* trigger register offset */
+ u32 bit; /* trigger bit */
+ u32 flags; /* BCM_CLK_TRIG_FLAGS_* below */
+};
+
+/*
+ * Trigger flags:
+ * EXISTS means this trigger exists
+ */
+#define BCM_CLK_TRIG_FLAGS_EXISTS ((u32)1 << 0) /* Trigger is valid */
+
+/* Trigger initialization macro */
+#define TRIGGER(_offset, _bit) \
+ { \
+ .offset = (_offset), \
+ .bit = (_bit), \
+ .flags = FLAG(TRIG, EXISTS), \
+ }
+
+struct peri_clk_data {
+ struct bcm_clk_policy policy;
+ struct bcm_clk_gate gate;
+ struct bcm_clk_hyst hyst;
+ struct bcm_clk_trig pre_trig;
+ struct bcm_clk_div pre_div;
+ struct bcm_clk_trig trig;
+ struct bcm_clk_div div;
+ struct bcm_clk_sel sel;
+ const char *clocks[]; /* must be last; use CLOCKS() to declare */
+};
+#define CLOCKS(...) { __VA_ARGS__, NULL, }
+#define NO_CLOCKS { NULL, } /* Must use of no parent clocks */
+
+struct kona_clk {
+ struct clk_hw hw;
+ struct clk_init_data init_data; /* includes name of this clock */
+ struct ccu_data *ccu; /* ccu this clock is associated with */
+ enum bcm_clk_type type;
+ union {
+ void *data;
+ struct peri_clk_data *peri;
+ } u;
+};
+#define to_kona_clk(_hw) \
+ container_of(_hw, struct kona_clk, hw)
+
+/* Initialization macro for an entry in a CCU's kona_clks[] array. */
+#define KONA_CLK(_ccu_name, _clk_name, _type) \
+ { \
+ .init_data = { \
+ .name = #_clk_name, \
+ .ops = &kona_ ## _type ## _clk_ops, \
+ }, \
+ .ccu = &_ccu_name ## _ccu_data, \
+ .type = bcm_clk_ ## _type, \
+ .u.data = &_clk_name ## _data, \
+ }
+#define LAST_KONA_CLK { .type = bcm_clk_none }
+
+/*
+ * CCU policy control. To enable software update of the policy
+ * tables the CCU policy engine must be stopped by setting the
+ * software update enable bit (LVM_EN). After an update the engine
+ * is restarted using the GO bit and either the GO_ATL or GO_AC bit.
+ */
+struct bcm_lvm_en {
+ u32 offset; /* LVM_EN register offset */
+ u32 bit; /* POLICY_CONFIG_EN bit in register */
+};
+
+/* Policy enable initialization macro */
+#define CCU_LVM_EN(_offset, _bit) \
+ { \
+ .offset = (_offset), \
+ .bit = (_bit), \
+ }
+
+struct bcm_policy_ctl {
+ u32 offset; /* POLICY_CTL register offset */
+ u32 go_bit;
+ u32 atl_bit; /* GO, GO_ATL, and GO_AC bits */
+ u32 ac_bit;
+};
+
+/* Policy control initialization macro */
+#define CCU_POLICY_CTL(_offset, _go_bit, _ac_bit, _atl_bit) \
+ { \
+ .offset = (_offset), \
+ .go_bit = (_go_bit), \
+ .ac_bit = (_ac_bit), \
+ .atl_bit = (_atl_bit), \
+ }
+
+struct ccu_policy {
+ struct bcm_lvm_en enable;
+ struct bcm_policy_ctl control;
+};
+
+/*
+ * Each CCU defines a mapped area of memory containing registers
+ * used to manage clocks implemented by the CCU. Access to memory
+ * within the CCU's space is serialized by a spinlock. Before any
+ * (other) address can be written, a special access "password" value
+ * must be written to its WR_ACCESS register (located at the base
+ * address of the range). We keep track of the name of each CCU as
+ * it is set up, and maintain them in a list.
+ */
+struct ccu_data {
+ void __iomem *base; /* base of mapped address space */
+ spinlock_t lock; /* serialization lock */
+ bool write_enabled; /* write access is currently enabled */
+ struct ccu_policy policy;
+ struct list_head links; /* for ccu_list */
+ struct device_node *node;
+ struct clk_onecell_data clk_data;
+ const char *name;
+ u32 range; /* byte range of address space */
+ struct kona_clk kona_clks[]; /* must be last */
+};
+
+/* Initialization for common fields in a Kona ccu_data structure */
+#define KONA_CCU_COMMON(_prefix, _name, _ccuname) \
+ .name = #_name "_ccu", \
+ .lock = __SPIN_LOCK_UNLOCKED(_name ## _ccu_data.lock), \
+ .links = LIST_HEAD_INIT(_name ## _ccu_data.links), \
+ .clk_data = { \
+ .clk_num = _prefix ## _ ## _ccuname ## _CCU_CLOCK_COUNT, \
+ }
+
+/* Exported globals */
+
+extern struct clk_ops kona_peri_clk_ops;
+
+/* Externally visible functions */
+
+extern u64 do_div_round_closest(u64 dividend, unsigned long divisor);
+extern u64 scaled_div_max(struct bcm_clk_div *div);
+extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
+ u32 billionths);
+
+extern struct clk *kona_clk_setup(struct kona_clk *bcm_clk);
+extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
+ struct device_node *node);
+extern bool __init kona_ccu_init(struct ccu_data *ccu);
+
+#endif /* _CLK_KONA_H */
diff --git a/drivers/clk/berlin/Makefile b/drivers/clk/berlin/Makefile
new file mode 100644
index 00000000000..2a36ab710a0
--- /dev/null
+++ b/drivers/clk/berlin/Makefile
@@ -0,0 +1,4 @@
+obj-y += berlin2-avpll.o berlin2-pll.o berlin2-div.o
+obj-$(CONFIG_MACH_BERLIN_BG2) += bg2.o
+obj-$(CONFIG_MACH_BERLIN_BG2CD) += bg2.o
+obj-$(CONFIG_MACH_BERLIN_BG2Q) += bg2q.o
diff --git a/drivers/clk/berlin/berlin2-avpll.c b/drivers/clk/berlin/berlin2-avpll.c
new file mode 100644
index 00000000000..fd0f26c3846
--- /dev/null
+++ b/drivers/clk/berlin/berlin2-avpll.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "berlin2-avpll.h"
+
+/*
+ * Berlin2 SoCs comprise up to two PLLs called AVPLL built upon a
+ * VCO with 8 channels each, channel 8 is the odd-one-out and does
+ * not provide mul/div.
+ *
+ * Unfortunately, its registers are not named but just numbered. To
+ * get in at least some kind of structure, we split each AVPLL into
+ * the VCOs and each channel into separate clock drivers.
+ *
+ * Also, here and there the VCO registers are a bit different with
+ * respect to bit shifts. Make sure to add a comment for those.
+ */
+#define NUM_CHANNELS 8
+
+#define AVPLL_CTRL(x) ((x) * 0x4)
+
+#define VCO_CTRL0 AVPLL_CTRL(0)
+/* BG2/BG2CDs VCO_B has an additional shift of 4 for its VCO_CTRL0 reg */
+#define VCO_RESET BIT(0)
+#define VCO_POWERUP BIT(1)
+#define VCO_INTERPOL_SHIFT 2
+#define VCO_INTERPOL_MASK (0xf << VCO_INTERPOL_SHIFT)
+#define VCO_REG1V45_SEL_SHIFT 6
+#define VCO_REG1V45_SEL(x) ((x) << VCO_REG1V45_SEL_SHIFT)
+#define VCO_REG1V45_SEL_1V40 VCO_REG1V45_SEL(0)
+#define VCO_REG1V45_SEL_1V45 VCO_REG1V45_SEL(1)
+#define VCO_REG1V45_SEL_1V50 VCO_REG1V45_SEL(2)
+#define VCO_REG1V45_SEL_1V55 VCO_REG1V45_SEL(3)
+#define VCO_REG1V45_SEL_MASK VCO_REG1V45_SEL(3)
+#define VCO_REG0V9_SEL_SHIFT 8
+#define VCO_REG0V9_SEL_MASK (0xf << VCO_REG0V9_SEL_SHIFT)
+#define VCO_VTHCAL_SHIFT 12
+#define VCO_VTHCAL(x) ((x) << VCO_VTHCAL_SHIFT)
+#define VCO_VTHCAL_0V90 VCO_VTHCAL(0)
+#define VCO_VTHCAL_0V95 VCO_VTHCAL(1)
+#define VCO_VTHCAL_1V00 VCO_VTHCAL(2)
+#define VCO_VTHCAL_1V05 VCO_VTHCAL(3)
+#define VCO_VTHCAL_MASK VCO_VTHCAL(3)
+#define VCO_KVCOEXT_SHIFT 14
+#define VCO_KVCOEXT_MASK (0x3 << VCO_KVCOEXT_SHIFT)
+#define VCO_KVCOEXT_ENABLE BIT(17)
+#define VCO_V2IEXT_SHIFT 18
+#define VCO_V2IEXT_MASK (0xf << VCO_V2IEXT_SHIFT)
+#define VCO_V2IEXT_ENABLE BIT(22)
+#define VCO_SPEED_SHIFT 23
+#define VCO_SPEED(x) ((x) << VCO_SPEED_SHIFT)
+#define VCO_SPEED_1G08_1G21 VCO_SPEED(0)
+#define VCO_SPEED_1G21_1G40 VCO_SPEED(1)
+#define VCO_SPEED_1G40_1G61 VCO_SPEED(2)
+#define VCO_SPEED_1G61_1G86 VCO_SPEED(3)
+#define VCO_SPEED_1G86_2G00 VCO_SPEED(4)
+#define VCO_SPEED_2G00_2G22 VCO_SPEED(5)
+#define VCO_SPEED_2G22 VCO_SPEED(6)
+#define VCO_SPEED_MASK VCO_SPEED(0x7)
+#define VCO_CLKDET_ENABLE BIT(26)
+#define VCO_CTRL1 AVPLL_CTRL(1)
+#define VCO_REFDIV_SHIFT 0
+#define VCO_REFDIV(x) ((x) << VCO_REFDIV_SHIFT)
+#define VCO_REFDIV_1 VCO_REFDIV(0)
+#define VCO_REFDIV_2 VCO_REFDIV(1)
+#define VCO_REFDIV_4 VCO_REFDIV(2)
+#define VCO_REFDIV_3 VCO_REFDIV(3)
+#define VCO_REFDIV_MASK VCO_REFDIV(0x3f)
+#define VCO_FBDIV_SHIFT 6
+#define VCO_FBDIV(x) ((x) << VCO_FBDIV_SHIFT)
+#define VCO_FBDIV_MASK VCO_FBDIV(0xff)
+#define VCO_ICP_SHIFT 14
+/* PLL Charge Pump Current = 10uA * (x + 1) */
+#define VCO_ICP(x) ((x) << VCO_ICP_SHIFT)
+#define VCO_ICP_MASK VCO_ICP(0xf)
+#define VCO_LOAD_CAP BIT(18)
+#define VCO_CALIBRATION_START BIT(19)
+#define VCO_FREQOFFSETn(x) AVPLL_CTRL(3 + (x))
+#define VCO_FREQOFFSET_MASK 0x7ffff
+#define VCO_CTRL10 AVPLL_CTRL(10)
+#define VCO_POWERUP_CH1 BIT(20)
+#define VCO_CTRL11 AVPLL_CTRL(11)
+#define VCO_CTRL12 AVPLL_CTRL(12)
+#define VCO_CTRL13 AVPLL_CTRL(13)
+#define VCO_CTRL14 AVPLL_CTRL(14)
+#define VCO_CTRL15 AVPLL_CTRL(15)
+#define VCO_SYNC1n(x) AVPLL_CTRL(15 + (x))
+#define VCO_SYNC1_MASK 0x1ffff
+#define VCO_SYNC2n(x) AVPLL_CTRL(23 + (x))
+#define VCO_SYNC2_MASK 0x1ffff
+#define VCO_CTRL30 AVPLL_CTRL(30)
+#define VCO_DPLL_CH1_ENABLE BIT(17)
+
+struct berlin2_avpll_vco {
+ struct clk_hw hw;
+ void __iomem *base;
+ u8 flags;
+};
+
+#define to_avpll_vco(hw) container_of(hw, struct berlin2_avpll_vco, hw)
+
+static int berlin2_avpll_vco_is_enabled(struct clk_hw *hw)
+{
+ struct berlin2_avpll_vco *vco = to_avpll_vco(hw);
+ u32 reg;
+
+ reg = readl_relaxed(vco->base + VCO_CTRL0);
+ if (vco->flags & BERLIN2_AVPLL_BIT_QUIRK)
+ reg >>= 4;
+
+ return !!(reg & VCO_POWERUP);
+}
+
+static int berlin2_avpll_vco_enable(struct clk_hw *hw)
+{
+ struct berlin2_avpll_vco *vco = to_avpll_vco(hw);
+ u32 reg;
+
+ reg = readl_relaxed(vco->base + VCO_CTRL0);
+ if (vco->flags & BERLIN2_AVPLL_BIT_QUIRK)
+ reg |= VCO_POWERUP << 4;
+ else
+ reg |= VCO_POWERUP;
+ writel_relaxed(reg, vco->base + VCO_CTRL0);
+
+ return 0;
+}
+
+static void berlin2_avpll_vco_disable(struct clk_hw *hw)
+{
+ struct berlin2_avpll_vco *vco = to_avpll_vco(hw);
+ u32 reg;
+
+ reg = readl_relaxed(vco->base + VCO_CTRL0);
+ if (vco->flags & BERLIN2_AVPLL_BIT_QUIRK)
+ reg &= ~(VCO_POWERUP << 4);
+ else
+ reg &= ~VCO_POWERUP;
+ writel_relaxed(reg, vco->base + VCO_CTRL0);
+}
+
+static u8 vco_refdiv[] = { 1, 2, 4, 3 };
+
+static unsigned long
+berlin2_avpll_vco_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct berlin2_avpll_vco *vco = to_avpll_vco(hw);
+ u32 reg, refdiv, fbdiv;
+ u64 freq = parent_rate;
+
+ /* AVPLL VCO frequency: Fvco = (Fref / refdiv) * fbdiv */
+ reg = readl_relaxed(vco->base + VCO_CTRL1);
+ refdiv = (reg & VCO_REFDIV_MASK) >> VCO_REFDIV_SHIFT;
+ refdiv = vco_refdiv[refdiv];
+ fbdiv = (reg & VCO_FBDIV_MASK) >> VCO_FBDIV_SHIFT;
+ freq *= fbdiv;
+ do_div(freq, refdiv);
+
+ return (unsigned long)freq;
+}
+
+static const struct clk_ops berlin2_avpll_vco_ops = {
+ .is_enabled = berlin2_avpll_vco_is_enabled,
+ .enable = berlin2_avpll_vco_enable,
+ .disable = berlin2_avpll_vco_disable,
+ .recalc_rate = berlin2_avpll_vco_recalc_rate,
+};
+
+struct clk * __init berlin2_avpll_vco_register(void __iomem *base,
+ const char *name, const char *parent_name,
+ u8 vco_flags, unsigned long flags)
+{
+ struct berlin2_avpll_vco *vco;
+ struct clk_init_data init;
+
+ vco = kzalloc(sizeof(*vco), GFP_KERNEL);
+ if (!vco)
+ return ERR_PTR(-ENOMEM);
+
+ vco->base = base;
+ vco->flags = vco_flags;
+ vco->hw.init = &init;
+ init.name = name;
+ init.ops = &berlin2_avpll_vco_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ return clk_register(NULL, &vco->hw);
+}
+
+struct berlin2_avpll_channel {
+ struct clk_hw hw;
+ void __iomem *base;
+ u8 flags;
+ u8 index;
+};
+
+#define to_avpll_channel(hw) container_of(hw, struct berlin2_avpll_channel, hw)
+
+static int berlin2_avpll_channel_is_enabled(struct clk_hw *hw)
+{
+ struct berlin2_avpll_channel *ch = to_avpll_channel(hw);
+ u32 reg;
+
+ if (ch->index == 7)
+ return 1;
+
+ reg = readl_relaxed(ch->base + VCO_CTRL10);
+ reg &= VCO_POWERUP_CH1 << ch->index;
+
+ return !!reg;
+}
+
+static int berlin2_avpll_channel_enable(struct clk_hw *hw)
+{
+ struct berlin2_avpll_channel *ch = to_avpll_channel(hw);
+ u32 reg;
+
+ reg = readl_relaxed(ch->base + VCO_CTRL10);
+ reg |= VCO_POWERUP_CH1 << ch->index;
+ writel_relaxed(reg, ch->base + VCO_CTRL10);
+
+ return 0;
+}
+
+static void berlin2_avpll_channel_disable(struct clk_hw *hw)
+{
+ struct berlin2_avpll_channel *ch = to_avpll_channel(hw);
+ u32 reg;
+
+ reg = readl_relaxed(ch->base + VCO_CTRL10);
+ reg &= ~(VCO_POWERUP_CH1 << ch->index);
+ writel_relaxed(reg, ch->base + VCO_CTRL10);
+}
+
+static const u8 div_hdmi[] = { 1, 2, 4, 6 };
+static const u8 div_av1[] = { 1, 2, 5, 5 };
+
+static unsigned long
+berlin2_avpll_channel_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct berlin2_avpll_channel *ch = to_avpll_channel(hw);
+ u32 reg, div_av2, div_av3, divider = 1;
+ u64 freq = parent_rate;
+
+ reg = readl_relaxed(ch->base + VCO_CTRL30);
+ if ((reg & (VCO_DPLL_CH1_ENABLE << ch->index)) == 0)
+ goto skip_div;
+
+ /*
+ * Fch = (Fref * sync2) /
+ * (sync1 * div_hdmi * div_av1 * div_av2 * div_av3)
+ */
+
+ reg = readl_relaxed(ch->base + VCO_SYNC1n(ch->index));
+ /* BG2/BG2CDs SYNC1 reg on AVPLL_B channel 1 is shifted by 4 */
+ if (ch->flags & BERLIN2_AVPLL_BIT_QUIRK && ch->index == 0)
+ reg >>= 4;
+ divider = reg & VCO_SYNC1_MASK;
+
+ reg = readl_relaxed(ch->base + VCO_SYNC2n(ch->index));
+ freq *= reg & VCO_SYNC2_MASK;
+
+ /* Channel 8 has no dividers */
+ if (ch->index == 7)
+ goto skip_div;
+
+ /*
+ * HDMI divider start at VCO_CTRL11, bit 7; MSB is enable, lower 2 bit
+ * determine divider.
+ */
+ reg = readl_relaxed(ch->base + VCO_CTRL11) >> 7;
+ reg = (reg >> (ch->index * 3));
+ if (reg & BIT(2))
+ divider *= div_hdmi[reg & 0x3];
+
+ /*
+ * AV1 divider start at VCO_CTRL11, bit 28; MSB is enable, lower 2 bit
+ * determine divider.
+ */
+ if (ch->index == 0) {
+ reg = readl_relaxed(ch->base + VCO_CTRL11);
+ reg >>= 28;
+ } else {
+ reg = readl_relaxed(ch->base + VCO_CTRL12);
+ reg >>= (ch->index-1) * 3;
+ }
+ if (reg & BIT(2))
+ divider *= div_av1[reg & 0x3];
+
+ /*
+ * AV2 divider start at VCO_CTRL12, bit 18; each 7 bits wide,
+ * zero is not a valid value.
+ */
+ if (ch->index < 2) {
+ reg = readl_relaxed(ch->base + VCO_CTRL12);
+ reg >>= 18 + (ch->index * 7);
+ } else if (ch->index < 7) {
+ reg = readl_relaxed(ch->base + VCO_CTRL13);
+ reg >>= (ch->index - 2) * 7;
+ } else {
+ reg = readl_relaxed(ch->base + VCO_CTRL14);
+ }
+ div_av2 = reg & 0x7f;
+ if (div_av2)
+ divider *= div_av2;
+
+ /*
+ * AV3 divider start at VCO_CTRL14, bit 7; each 4 bits wide.
+ * AV2/AV3 form a fractional divider, where only specfic values for AV3
+ * are allowed. AV3 != 0 divides by AV2/2, AV3=0 is bypass.
+ */
+ if (ch->index < 6) {
+ reg = readl_relaxed(ch->base + VCO_CTRL14);
+ reg >>= 7 + (ch->index * 4);
+ } else {
+ reg = readl_relaxed(ch->base + VCO_CTRL15);
+ }
+ div_av3 = reg & 0xf;
+ if (div_av2 && div_av3)
+ freq *= 2;
+
+skip_div:
+ do_div(freq, divider);
+ return (unsigned long)freq;
+}
+
+static const struct clk_ops berlin2_avpll_channel_ops = {
+ .is_enabled = berlin2_avpll_channel_is_enabled,
+ .enable = berlin2_avpll_channel_enable,
+ .disable = berlin2_avpll_channel_disable,
+ .recalc_rate = berlin2_avpll_channel_recalc_rate,
+};
+
+/*
+ * Another nice quirk:
+ * On some production SoCs, AVPLL channels are scrambled with respect
+ * to the channel numbering in the registers but still referenced by
+ * their original channel numbers. We deal with it by having a flag
+ * and a translation table for the index.
+ */
+static const u8 quirk_index[] __initconst = { 0, 6, 5, 4, 3, 2, 1, 7 };
+
+struct clk * __init berlin2_avpll_channel_register(void __iomem *base,
+ const char *name, u8 index, const char *parent_name,
+ u8 ch_flags, unsigned long flags)
+{
+ struct berlin2_avpll_channel *ch;
+ struct clk_init_data init;
+
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return ERR_PTR(-ENOMEM);
+
+ ch->base = base;
+ if (ch_flags & BERLIN2_AVPLL_SCRAMBLE_QUIRK)
+ ch->index = quirk_index[index];
+ else
+ ch->index = index;
+
+ ch->flags = ch_flags;
+ ch->hw.init = &init;
+ init.name = name;
+ init.ops = &berlin2_avpll_channel_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ return clk_register(NULL, &ch->hw);
+}
diff --git a/drivers/clk/berlin/berlin2-avpll.h b/drivers/clk/berlin/berlin2-avpll.h
new file mode 100644
index 00000000000..a37f5068d29
--- /dev/null
+++ b/drivers/clk/berlin/berlin2-avpll.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __BERLIN2_AVPLL_H
+#define __BERLIN2_AVPLL_H
+
+struct clk;
+
+#define BERLIN2_AVPLL_BIT_QUIRK BIT(0)
+#define BERLIN2_AVPLL_SCRAMBLE_QUIRK BIT(1)
+
+struct clk * __init
+berlin2_avpll_vco_register(void __iomem *base, const char *name,
+ const char *parent_name, u8 vco_flags, unsigned long flags);
+
+struct clk * __init
+berlin2_avpll_channel_register(void __iomem *base, const char *name,
+ u8 index, const char *parent_name, u8 ch_flags,
+ unsigned long flags);
+
+#endif /* __BERLIN2_AVPLL_H */
diff --git a/drivers/clk/berlin/berlin2-div.c b/drivers/clk/berlin/berlin2-div.c
new file mode 100644
index 00000000000..81ff97f8aa0
--- /dev/null
+++ b/drivers/clk/berlin/berlin2-div.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "berlin2-div.h"
+
+/*
+ * Clock dividers in Berlin2 SoCs comprise a complex cell to select
+ * input pll and divider. The virtual structure as it is used in Marvell
+ * BSP code can be seen as:
+ *
+ * +---+
+ * pll0 --------------->| 0 | +---+
+ * +---+ |(B)|--+--------------->| 0 | +---+
+ * pll1.0 -->| 0 | +-->| 1 | | +--------+ |(E)|----->| 0 | +---+
+ * pll1.1 -->| 1 | | +---+ +-->|(C) 1:M |-->| 1 | |(F)|-->|(G)|->
+ * ... -->|(A)|--+ | +--------+ +---+ +-->| 1 | +---+
+ * ... -->| | +-->|(D) 1:3 |----------+ +---+
+ * pll1.N -->| N | +---------
+ * +---+
+ *
+ * (A) input pll clock mux controlled by <PllSelect[1:n]>
+ * (B) input pll bypass mux controlled by <PllSwitch>
+ * (C) programmable clock divider controlled by <Select[1:n]>
+ * (D) constant div-by-3 clock divider
+ * (E) programmable clock divider bypass controlled by <Switch>
+ * (F) constant div-by-3 clock mux controlled by <D3Switch>
+ * (G) clock gate controlled by <Enable>
+ *
+ * For whatever reason, above control signals come in two flavors:
+ * - single register dividers with all bits in one register
+ * - shared register dividers with bits spread over multiple registers
+ * (including signals for the same cell spread over consecutive registers)
+ *
+ * Also, clock gate and pll mux is not available on every div cell, so
+ * we have to deal with those, too. We reuse common clock composite driver
+ * for it.
+ */
+
+#define PLL_SELECT_MASK 0x7
+#define DIV_SELECT_MASK 0x7
+
+struct berlin2_div {
+ struct clk_hw hw;
+ void __iomem *base;
+ struct berlin2_div_map map;
+ spinlock_t *lock;
+};
+
+#define to_berlin2_div(hw) container_of(hw, struct berlin2_div, hw)
+
+static u8 clk_div[] = { 1, 2, 4, 6, 8, 12, 1, 1 };
+
+static int berlin2_div_is_enabled(struct clk_hw *hw)
+{
+ struct berlin2_div *div = to_berlin2_div(hw);
+ struct berlin2_div_map *map = &div->map;
+ u32 reg;
+
+ if (div->lock)
+ spin_lock(div->lock);
+
+ reg = readl_relaxed(div->base + map->gate_offs);
+ reg >>= map->gate_shift;
+
+ if (div->lock)
+ spin_unlock(div->lock);
+
+ return (reg & 0x1);
+}
+
+static int berlin2_div_enable(struct clk_hw *hw)
+{
+ struct berlin2_div *div = to_berlin2_div(hw);
+ struct berlin2_div_map *map = &div->map;
+ u32 reg;
+
+ if (div->lock)
+ spin_lock(div->lock);
+
+ reg = readl_relaxed(div->base + map->gate_offs);
+ reg |= BIT(map->gate_shift);
+ writel_relaxed(reg, div->base + map->gate_offs);
+
+ if (div->lock)
+ spin_unlock(div->lock);
+
+ return 0;
+}
+
+static void berlin2_div_disable(struct clk_hw *hw)
+{
+ struct berlin2_div *div = to_berlin2_div(hw);
+ struct berlin2_div_map *map = &div->map;
+ u32 reg;
+
+ if (div->lock)
+ spin_lock(div->lock);
+
+ reg = readl_relaxed(div->base + map->gate_offs);
+ reg &= ~BIT(map->gate_shift);
+ writel_relaxed(reg, div->base + map->gate_offs);
+
+ if (div->lock)
+ spin_unlock(div->lock);
+}
+
+static int berlin2_div_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct berlin2_div *div = to_berlin2_div(hw);
+ struct berlin2_div_map *map = &div->map;
+ u32 reg;
+
+ if (div->lock)
+ spin_lock(div->lock);
+
+ /* index == 0 is PLL_SWITCH */
+ reg = readl_relaxed(div->base + map->pll_switch_offs);
+ if (index == 0)
+ reg &= ~BIT(map->pll_switch_shift);
+ else
+ reg |= BIT(map->pll_switch_shift);
+ writel_relaxed(reg, div->base + map->pll_switch_offs);
+
+ /* index > 0 is PLL_SELECT */
+ if (index > 0) {
+ reg = readl_relaxed(div->base + map->pll_select_offs);
+ reg &= ~(PLL_SELECT_MASK << map->pll_select_shift);
+ reg |= (index - 1) << map->pll_select_shift;
+ writel_relaxed(reg, div->base + map->pll_select_offs);
+ }
+
+ if (div->lock)
+ spin_unlock(div->lock);
+
+ return 0;
+}
+
+static u8 berlin2_div_get_parent(struct clk_hw *hw)
+{
+ struct berlin2_div *div = to_berlin2_div(hw);
+ struct berlin2_div_map *map = &div->map;
+ u32 reg;
+ u8 index = 0;
+
+ if (div->lock)
+ spin_lock(div->lock);
+
+ /* PLL_SWITCH == 0 is index 0 */
+ reg = readl_relaxed(div->base + map->pll_switch_offs);
+ reg &= BIT(map->pll_switch_shift);
+ if (reg) {
+ reg = readl_relaxed(div->base + map->pll_select_offs);
+ reg >>= map->pll_select_shift;
+ reg &= PLL_SELECT_MASK;
+ index = 1 + reg;
+ }
+
+ if (div->lock)
+ spin_unlock(div->lock);
+
+ return index;
+}
+
+static unsigned long berlin2_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct berlin2_div *div = to_berlin2_div(hw);
+ struct berlin2_div_map *map = &div->map;
+ u32 divsw, div3sw, divider = 1;
+
+ if (div->lock)
+ spin_lock(div->lock);
+
+ divsw = readl_relaxed(div->base + map->div_switch_offs) &
+ (1 << map->div_switch_shift);
+ div3sw = readl_relaxed(div->base + map->div3_switch_offs) &
+ (1 << map->div3_switch_shift);
+
+ /* constant divide-by-3 (dominant) */
+ if (div3sw != 0) {
+ divider = 3;
+ /* divider can be bypassed with DIV_SWITCH == 0 */
+ } else if (divsw == 0) {
+ divider = 1;
+ /* clock divider determined by DIV_SELECT */
+ } else {
+ u32 reg;
+ reg = readl_relaxed(div->base + map->div_select_offs);
+ reg >>= map->div_select_shift;
+ reg &= DIV_SELECT_MASK;
+ divider = clk_div[reg];
+ }
+
+ if (div->lock)
+ spin_unlock(div->lock);
+
+ return parent_rate / divider;
+}
+
+static const struct clk_ops berlin2_div_rate_ops = {
+ .recalc_rate = berlin2_div_recalc_rate,
+};
+
+static const struct clk_ops berlin2_div_gate_ops = {
+ .is_enabled = berlin2_div_is_enabled,
+ .enable = berlin2_div_enable,
+ .disable = berlin2_div_disable,
+};
+
+static const struct clk_ops berlin2_div_mux_ops = {
+ .set_parent = berlin2_div_set_parent,
+ .get_parent = berlin2_div_get_parent,
+};
+
+struct clk * __init
+berlin2_div_register(const struct berlin2_div_map *map,
+ void __iomem *base, const char *name, u8 div_flags,
+ const char **parent_names, int num_parents,
+ unsigned long flags, spinlock_t *lock)
+{
+ const struct clk_ops *mux_ops = &berlin2_div_mux_ops;
+ const struct clk_ops *rate_ops = &berlin2_div_rate_ops;
+ const struct clk_ops *gate_ops = &berlin2_div_gate_ops;
+ struct berlin2_div *div;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ /* copy div_map to allow __initconst */
+ memcpy(&div->map, map, sizeof(*map));
+ div->base = base;
+ div->lock = lock;
+
+ if ((div_flags & BERLIN2_DIV_HAS_GATE) == 0)
+ gate_ops = NULL;
+ if ((div_flags & BERLIN2_DIV_HAS_MUX) == 0)
+ mux_ops = NULL;
+
+ return clk_register_composite(NULL, name, parent_names, num_parents,
+ &div->hw, mux_ops, &div->hw, rate_ops,
+ &div->hw, gate_ops, flags);
+}
diff --git a/drivers/clk/berlin/berlin2-div.h b/drivers/clk/berlin/berlin2-div.h
new file mode 100644
index 00000000000..15e3384f311
--- /dev/null
+++ b/drivers/clk/berlin/berlin2-div.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __BERLIN2_DIV_H
+#define __BERLIN2_DIV_H
+
+struct clk;
+
+#define BERLIN2_DIV_HAS_GATE BIT(0)
+#define BERLIN2_DIV_HAS_MUX BIT(1)
+
+#define BERLIN2_PLL_SELECT(_off, _sh) \
+ .pll_select_offs = _off, \
+ .pll_select_shift = _sh
+
+#define BERLIN2_PLL_SWITCH(_off, _sh) \
+ .pll_switch_offs = _off, \
+ .pll_switch_shift = _sh
+
+#define BERLIN2_DIV_SELECT(_off, _sh) \
+ .div_select_offs = _off, \
+ .div_select_shift = _sh
+
+#define BERLIN2_DIV_SWITCH(_off, _sh) \
+ .div_switch_offs = _off, \
+ .div_switch_shift = _sh
+
+#define BERLIN2_DIV_D3SWITCH(_off, _sh) \
+ .div3_switch_offs = _off, \
+ .div3_switch_shift = _sh
+
+#define BERLIN2_DIV_GATE(_off, _sh) \
+ .gate_offs = _off, \
+ .gate_shift = _sh
+
+#define BERLIN2_SINGLE_DIV(_off) \
+ BERLIN2_DIV_GATE(_off, 0), \
+ BERLIN2_PLL_SELECT(_off, 1), \
+ BERLIN2_PLL_SWITCH(_off, 4), \
+ BERLIN2_DIV_SWITCH(_off, 5), \
+ BERLIN2_DIV_D3SWITCH(_off, 6), \
+ BERLIN2_DIV_SELECT(_off, 7)
+
+struct berlin2_div_map {
+ u16 pll_select_offs;
+ u16 pll_switch_offs;
+ u16 div_select_offs;
+ u16 div_switch_offs;
+ u16 div3_switch_offs;
+ u16 gate_offs;
+ u8 pll_select_shift;
+ u8 pll_switch_shift;
+ u8 div_select_shift;
+ u8 div_switch_shift;
+ u8 div3_switch_shift;
+ u8 gate_shift;
+};
+
+struct berlin2_div_data {
+ const char *name;
+ const u8 *parent_ids;
+ int num_parents;
+ unsigned long flags;
+ struct berlin2_div_map map;
+ u8 div_flags;
+};
+
+struct clk * __init
+berlin2_div_register(const struct berlin2_div_map *map,
+ void __iomem *base, const char *name, u8 div_flags,
+ const char **parent_names, int num_parents,
+ unsigned long flags, spinlock_t *lock);
+
+#endif /* __BERLIN2_DIV_H */
diff --git a/drivers/clk/berlin/berlin2-pll.c b/drivers/clk/berlin/berlin2-pll.c
new file mode 100644
index 00000000000..bdc506b0382
--- /dev/null
+++ b/drivers/clk/berlin/berlin2-pll.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <asm/div64.h>
+
+#include "berlin2-div.h"
+
+struct berlin2_pll_map {
+ const u8 vcodiv[16];
+ u8 mult;
+ u8 fbdiv_shift;
+ u8 rfdiv_shift;
+ u8 divsel_shift;
+};
+
+struct berlin2_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+ struct berlin2_pll_map map;
+};
+
+#define to_berlin2_pll(hw) container_of(hw, struct berlin2_pll, hw)
+
+#define SPLL_CTRL0 0x00
+#define SPLL_CTRL1 0x04
+#define SPLL_CTRL2 0x08
+#define SPLL_CTRL3 0x0c
+#define SPLL_CTRL4 0x10
+
+#define FBDIV_MASK 0x1ff
+#define RFDIV_MASK 0x1f
+#define DIVSEL_MASK 0xf
+
+/*
+ * The output frequency formula for the pll is:
+ * clkout = fbdiv / refdiv * parent / vcodiv
+ */
+static unsigned long
+berlin2_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct berlin2_pll *pll = to_berlin2_pll(hw);
+ struct berlin2_pll_map *map = &pll->map;
+ u32 val, fbdiv, rfdiv, vcodivsel, vcodiv;
+ u64 rate = parent_rate;
+
+ val = readl_relaxed(pll->base + SPLL_CTRL0);
+ fbdiv = (val >> map->fbdiv_shift) & FBDIV_MASK;
+ rfdiv = (val >> map->rfdiv_shift) & RFDIV_MASK;
+ if (rfdiv == 0) {
+ pr_warn("%s has zero rfdiv\n", __clk_get_name(hw->clk));
+ rfdiv = 1;
+ }
+
+ val = readl_relaxed(pll->base + SPLL_CTRL1);
+ vcodivsel = (val >> map->divsel_shift) & DIVSEL_MASK;
+ vcodiv = map->vcodiv[vcodivsel];
+ if (vcodiv == 0) {
+ pr_warn("%s has zero vcodiv (index %d)\n",
+ __clk_get_name(hw->clk), vcodivsel);
+ vcodiv = 1;
+ }
+
+ rate *= fbdiv * map->mult;
+ do_div(rate, rfdiv * vcodiv);
+
+ return (unsigned long)rate;
+}
+
+static const struct clk_ops berlin2_pll_ops = {
+ .recalc_rate = berlin2_pll_recalc_rate,
+};
+
+struct clk * __init
+berlin2_pll_register(const struct berlin2_pll_map *map,
+ void __iomem *base, const char *name,
+ const char *parent_name, unsigned long flags)
+{
+ struct clk_init_data init;
+ struct berlin2_pll *pll;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ /* copy pll_map to allow __initconst */
+ memcpy(&pll->map, map, sizeof(*map));
+ pll->base = base;
+ pll->hw.init = &init;
+ init.name = name;
+ init.ops = &berlin2_pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ return clk_register(NULL, &pll->hw);
+}
diff --git a/drivers/clk/berlin/berlin2-pll.h b/drivers/clk/berlin/berlin2-pll.h
new file mode 100644
index 00000000000..8831ce27ac1
--- /dev/null
+++ b/drivers/clk/berlin/berlin2-pll.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __BERLIN2_PLL_H
+#define __BERLIN2_PLL_H
+
+struct clk;
+
+struct berlin2_pll_map {
+ const u8 vcodiv[16];
+ u8 mult;
+ u8 fbdiv_shift;
+ u8 rfdiv_shift;
+ u8 divsel_shift;
+};
+
+struct clk * __init
+berlin2_pll_register(const struct berlin2_pll_map *map,
+ void __iomem *base, const char *name,
+ const char *parent_name, unsigned long flags);
+
+#endif /* __BERLIN2_PLL_H */
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
new file mode 100644
index 00000000000..515fb133495
--- /dev/null
+++ b/drivers/clk/berlin/bg2.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/berlin2.h>
+
+#include "berlin2-avpll.h"
+#include "berlin2-div.h"
+#include "berlin2-pll.h"
+#include "common.h"
+
+#define REG_PINMUX0 0x0000
+#define REG_PINMUX1 0x0004
+#define REG_SYSPLLCTL0 0x0014
+#define REG_SYSPLLCTL4 0x0024
+#define REG_MEMPLLCTL0 0x0028
+#define REG_MEMPLLCTL4 0x0038
+#define REG_CPUPLLCTL0 0x003c
+#define REG_CPUPLLCTL4 0x004c
+#define REG_AVPLLCTL0 0x0050
+#define REG_AVPLLCTL31 0x00cc
+#define REG_AVPLLCTL62 0x0148
+#define REG_PLLSTATUS 0x014c
+#define REG_CLKENABLE 0x0150
+#define REG_CLKSELECT0 0x0154
+#define REG_CLKSELECT1 0x0158
+#define REG_CLKSELECT2 0x015c
+#define REG_CLKSELECT3 0x0160
+#define REG_CLKSWITCH0 0x0164
+#define REG_CLKSWITCH1 0x0168
+#define REG_RESET_TRIGGER 0x0178
+#define REG_RESET_STATUS0 0x017c
+#define REG_RESET_STATUS1 0x0180
+#define REG_SW_GENERIC0 0x0184
+#define REG_SW_GENERIC3 0x0190
+#define REG_PRODUCTID 0x01cc
+#define REG_PRODUCTID_EXT 0x01d0
+#define REG_GFX3DCORE_CLKCTL 0x022c
+#define REG_GFX3DSYS_CLKCTL 0x0230
+#define REG_ARC_CLKCTL 0x0234
+#define REG_VIP_CLKCTL 0x0238
+#define REG_SDIO0XIN_CLKCTL 0x023c
+#define REG_SDIO1XIN_CLKCTL 0x0240
+#define REG_GFX3DEXTRA_CLKCTL 0x0244
+#define REG_GFX3D_RESET 0x0248
+#define REG_GC360_CLKCTL 0x024c
+#define REG_SDIO_DLLMST_CLKCTL 0x0250
+
+/*
+ * BG2/BG2CD SoCs have the following audio/video I/O units:
+ *
+ * audiohd: HDMI TX audio
+ * audio0: 7.1ch TX
+ * audio1: 2ch TX
+ * audio2: 2ch RX
+ * audio3: SPDIF TX
+ * video0: HDMI video
+ * video1: Secondary video
+ * video2: SD auxiliary video
+ *
+ * There are no external audio clocks (ACLKI0, ACLKI1) and
+ * only one external video clock (VCLKI0).
+ *
+ * Currently missing bits and pieces:
+ * - audio_fast_pll is unknown
+ * - audiohd_pll is unknown
+ * - video0_pll is unknown
+ * - audio[023], audiohd parent pll is assumed to be audio_fast_pll
+ *
+ */
+
+#define MAX_CLKS 41
+static struct clk *clks[MAX_CLKS];
+static struct clk_onecell_data clk_data;
+static DEFINE_SPINLOCK(lock);
+static void __iomem *gbase;
+
+enum {
+ REFCLK, VIDEO_EXT0,
+ SYSPLL, MEMPLL, CPUPLL,
+ AVPLL_A1, AVPLL_A2, AVPLL_A3, AVPLL_A4,
+ AVPLL_A5, AVPLL_A6, AVPLL_A7, AVPLL_A8,
+ AVPLL_B1, AVPLL_B2, AVPLL_B3, AVPLL_B4,
+ AVPLL_B5, AVPLL_B6, AVPLL_B7, AVPLL_B8,
+ AUDIO1_PLL, AUDIO_FAST_PLL,
+ VIDEO0_PLL, VIDEO0_IN,
+ VIDEO1_PLL, VIDEO1_IN,
+ VIDEO2_PLL, VIDEO2_IN,
+};
+
+static const char *clk_names[] = {
+ [REFCLK] = "refclk",
+ [VIDEO_EXT0] = "video_ext0",
+ [SYSPLL] = "syspll",
+ [MEMPLL] = "mempll",
+ [CPUPLL] = "cpupll",
+ [AVPLL_A1] = "avpll_a1",
+ [AVPLL_A2] = "avpll_a2",
+ [AVPLL_A3] = "avpll_a3",
+ [AVPLL_A4] = "avpll_a4",
+ [AVPLL_A5] = "avpll_a5",
+ [AVPLL_A6] = "avpll_a6",
+ [AVPLL_A7] = "avpll_a7",
+ [AVPLL_A8] = "avpll_a8",
+ [AVPLL_B1] = "avpll_b1",
+ [AVPLL_B2] = "avpll_b2",
+ [AVPLL_B3] = "avpll_b3",
+ [AVPLL_B4] = "avpll_b4",
+ [AVPLL_B5] = "avpll_b5",
+ [AVPLL_B6] = "avpll_b6",
+ [AVPLL_B7] = "avpll_b7",
+ [AVPLL_B8] = "avpll_b8",
+ [AUDIO1_PLL] = "audio1_pll",
+ [AUDIO_FAST_PLL] = "audio_fast_pll",
+ [VIDEO0_PLL] = "video0_pll",
+ [VIDEO0_IN] = "video0_in",
+ [VIDEO1_PLL] = "video1_pll",
+ [VIDEO1_IN] = "video1_in",
+ [VIDEO2_PLL] = "video2_pll",
+ [VIDEO2_IN] = "video2_in",
+};
+
+static const struct berlin2_pll_map bg2_pll_map __initconst = {
+ .vcodiv = {10, 15, 20, 25, 30, 40, 50, 60, 80},
+ .mult = 10,
+ .fbdiv_shift = 6,
+ .rfdiv_shift = 1,
+ .divsel_shift = 7,
+};
+
+static const u8 default_parent_ids[] = {
+ SYSPLL, AVPLL_B4, AVPLL_A5, AVPLL_B6, AVPLL_B7, SYSPLL
+};
+
+static const struct berlin2_div_data bg2_divs[] __initconst = {
+ {
+ .name = "sys",
+ .parent_ids = (const u8 []){
+ SYSPLL, AVPLL_B4, AVPLL_B5, AVPLL_B6, AVPLL_B7, SYSPLL
+ },
+ .num_parents = 6,
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 0),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 0),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 3),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 3),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 4),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 5),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ {
+ .name = "cpu",
+ .parent_ids = (const u8 []){
+ CPUPLL, MEMPLL, MEMPLL, MEMPLL, MEMPLL
+ },
+ .num_parents = 5,
+ .map = {
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 6),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 9),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 6),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 7),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 8),
+ },
+ .div_flags = BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "drmfigo",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 16),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 17),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 20),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 12),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 13),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 14),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "cfg",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 1),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 23),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 26),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 15),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 16),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 17),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "gfx",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 4),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 29),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 0),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 18),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 19),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 20),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "zsp",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 5),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 3),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 6),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 21),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 22),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 23),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "perif",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 6),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 9),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 12),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 24),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 25),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 26),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ {
+ .name = "pcube",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 2),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 15),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 18),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 27),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 28),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 29),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "vscope",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 3),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 21),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 24),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 30),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 31),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 0),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "nfc_ecc",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 18),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 27),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT2, 0),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH1, 1),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH1, 2),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 3),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "vpp",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 21),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT2, 3),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT2, 6),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH1, 4),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH1, 5),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 6),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "app",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 20),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT2, 9),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT2, 12),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH1, 7),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH1, 8),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 9),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "audio0",
+ .parent_ids = (const u8 []){ AUDIO_FAST_PLL },
+ .num_parents = 1,
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 22),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT2, 17),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH1, 10),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 11),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE,
+ .flags = 0,
+ },
+ {
+ .name = "audio2",
+ .parent_ids = (const u8 []){ AUDIO_FAST_PLL },
+ .num_parents = 1,
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 24),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT2, 20),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH1, 14),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 15),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE,
+ .flags = 0,
+ },
+ {
+ .name = "audio3",
+ .parent_ids = (const u8 []){ AUDIO_FAST_PLL },
+ .num_parents = 1,
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 25),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT2, 23),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH1, 16),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 17),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE,
+ .flags = 0,
+ },
+ {
+ .name = "audio1",
+ .parent_ids = (const u8 []){ AUDIO1_PLL },
+ .num_parents = 1,
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 23),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT3, 0),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH1, 12),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 13),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE,
+ .flags = 0,
+ },
+ {
+ .name = "gfx3d_core",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_GFX3DCORE_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "gfx3d_sys",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_GFX3DSYS_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "arc",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_ARC_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "vip",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_VIP_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "sdio0xin",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_SDIO0XIN_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "sdio1xin",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_SDIO1XIN_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "gfx3d_extra",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_GFX3DEXTRA_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "gc360",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_GC360_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "sdio_dllmst",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_SDIO_DLLMST_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+};
+
+static const struct berlin2_gate_data bg2_gates[] __initconst = {
+ { "geth0", "perif", 7 },
+ { "geth1", "perif", 8 },
+ { "sata", "perif", 9 },
+ { "ahbapb", "perif", 10, CLK_IGNORE_UNUSED },
+ { "usb0", "perif", 11 },
+ { "usb1", "perif", 12 },
+ { "pbridge", "perif", 13, CLK_IGNORE_UNUSED },
+ { "sdio0", "perif", 14, CLK_IGNORE_UNUSED },
+ { "sdio1", "perif", 15, CLK_IGNORE_UNUSED },
+ { "nfc", "perif", 17 },
+ { "smemc", "perif", 19 },
+ { "audiohd", "audiohd_pll", 26 },
+ { "video0", "video0_in", 27 },
+ { "video1", "video1_in", 28 },
+ { "video2", "video2_in", 29 },
+};
+
+static void __init berlin2_clock_setup(struct device_node *np)
+{
+ const char *parent_names[9];
+ struct clk *clk;
+ u8 avpll_flags = 0;
+ int n;
+
+ gbase = of_iomap(np, 0);
+ if (!gbase)
+ return;
+
+ /* overwrite default clock names with DT provided ones */
+ clk = of_clk_get_by_name(np, clk_names[REFCLK]);
+ if (!IS_ERR(clk)) {
+ clk_names[REFCLK] = __clk_get_name(clk);
+ clk_put(clk);
+ }
+
+ clk = of_clk_get_by_name(np, clk_names[VIDEO_EXT0]);
+ if (!IS_ERR(clk)) {
+ clk_names[VIDEO_EXT0] = __clk_get_name(clk);
+ clk_put(clk);
+ }
+
+ /* simple register PLLs */
+ clk = berlin2_pll_register(&bg2_pll_map, gbase + REG_SYSPLLCTL0,
+ clk_names[SYSPLL], clk_names[REFCLK], 0);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ clk = berlin2_pll_register(&bg2_pll_map, gbase + REG_MEMPLLCTL0,
+ clk_names[MEMPLL], clk_names[REFCLK], 0);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ clk = berlin2_pll_register(&bg2_pll_map, gbase + REG_CPUPLLCTL0,
+ clk_names[CPUPLL], clk_names[REFCLK], 0);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ if (of_device_is_compatible(np, "marvell,berlin2-global-register"))
+ avpll_flags |= BERLIN2_AVPLL_SCRAMBLE_QUIRK;
+
+ /* audio/video VCOs */
+ clk = berlin2_avpll_vco_register(gbase + REG_AVPLLCTL0, "avpll_vcoA",
+ clk_names[REFCLK], avpll_flags, 0);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ for (n = 0; n < 8; n++) {
+ clk = berlin2_avpll_channel_register(gbase + REG_AVPLLCTL0,
+ clk_names[AVPLL_A1 + n], n, "avpll_vcoA",
+ avpll_flags, 0);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+ }
+
+ clk = berlin2_avpll_vco_register(gbase + REG_AVPLLCTL31, "avpll_vcoB",
+ clk_names[REFCLK], BERLIN2_AVPLL_BIT_QUIRK |
+ avpll_flags, 0);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ for (n = 0; n < 8; n++) {
+ clk = berlin2_avpll_channel_register(gbase + REG_AVPLLCTL31,
+ clk_names[AVPLL_B1 + n], n, "avpll_vcoB",
+ BERLIN2_AVPLL_BIT_QUIRK | avpll_flags, 0);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+ }
+
+ /* reference clock bypass switches */
+ parent_names[0] = clk_names[SYSPLL];
+ parent_names[1] = clk_names[REFCLK];
+ clk = clk_register_mux(NULL, "syspll_byp", parent_names, 2,
+ 0, gbase + REG_CLKSWITCH0, 0, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+ clk_names[SYSPLL] = __clk_get_name(clk);
+
+ parent_names[0] = clk_names[MEMPLL];
+ parent_names[1] = clk_names[REFCLK];
+ clk = clk_register_mux(NULL, "mempll_byp", parent_names, 2,
+ 0, gbase + REG_CLKSWITCH0, 1, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+ clk_names[MEMPLL] = __clk_get_name(clk);
+
+ parent_names[0] = clk_names[CPUPLL];
+ parent_names[1] = clk_names[REFCLK];
+ clk = clk_register_mux(NULL, "cpupll_byp", parent_names, 2,
+ 0, gbase + REG_CLKSWITCH0, 2, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+ clk_names[CPUPLL] = __clk_get_name(clk);
+
+ /* clock muxes */
+ parent_names[0] = clk_names[AVPLL_B3];
+ parent_names[1] = clk_names[AVPLL_A3];
+ clk = clk_register_mux(NULL, clk_names[AUDIO1_PLL], parent_names, 2,
+ 0, gbase + REG_CLKSELECT2, 29, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ parent_names[0] = clk_names[VIDEO0_PLL];
+ parent_names[1] = clk_names[VIDEO_EXT0];
+ clk = clk_register_mux(NULL, clk_names[VIDEO0_IN], parent_names, 2,
+ 0, gbase + REG_CLKSELECT3, 4, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ parent_names[0] = clk_names[VIDEO1_PLL];
+ parent_names[1] = clk_names[VIDEO_EXT0];
+ clk = clk_register_mux(NULL, clk_names[VIDEO1_IN], parent_names, 2,
+ 0, gbase + REG_CLKSELECT3, 6, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ parent_names[0] = clk_names[AVPLL_A2];
+ parent_names[1] = clk_names[AVPLL_B2];
+ clk = clk_register_mux(NULL, clk_names[VIDEO1_PLL], parent_names, 2,
+ 0, gbase + REG_CLKSELECT3, 7, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ parent_names[0] = clk_names[VIDEO2_PLL];
+ parent_names[1] = clk_names[VIDEO_EXT0];
+ clk = clk_register_mux(NULL, clk_names[VIDEO2_IN], parent_names, 2,
+ 0, gbase + REG_CLKSELECT3, 9, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ parent_names[0] = clk_names[AVPLL_B1];
+ parent_names[1] = clk_names[AVPLL_A5];
+ clk = clk_register_mux(NULL, clk_names[VIDEO2_PLL], parent_names, 2,
+ 0, gbase + REG_CLKSELECT3, 10, 1, 0, &lock);
+ if (IS_ERR(clk))
+ goto bg2_fail;
+
+ /* clock divider cells */
+ for (n = 0; n < ARRAY_SIZE(bg2_divs); n++) {
+ const struct berlin2_div_data *dd = &bg2_divs[n];
+ int k;
+
+ for (k = 0; k < dd->num_parents; k++)
+ parent_names[k] = clk_names[dd->parent_ids[k]];
+
+ clks[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
+ dd->name, dd->div_flags, parent_names,
+ dd->num_parents, dd->flags, &lock);
+ }
+
+ /* clock gate cells */
+ for (n = 0; n < ARRAY_SIZE(bg2_gates); n++) {
+ const struct berlin2_gate_data *gd = &bg2_gates[n];
+
+ clks[CLKID_GETH0 + n] = clk_register_gate(NULL, gd->name,
+ gd->parent_name, gd->flags, gbase + REG_CLKENABLE,
+ gd->bit_idx, 0, &lock);
+ }
+
+ /* twdclk is derived from cpu/3 */
+ clks[CLKID_TWD] =
+ clk_register_fixed_factor(NULL, "twd", "cpu", 0, 1, 3);
+
+ /* check for errors on leaf clocks */
+ for (n = 0; n < MAX_CLKS; n++) {
+ if (!IS_ERR(clks[n]))
+ continue;
+
+ pr_err("%s: Unable to register leaf clock %d\n",
+ np->full_name, n);
+ goto bg2_fail;
+ }
+
+ /* register clk-provider */
+ clk_data.clks = clks;
+ clk_data.clk_num = MAX_CLKS;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ return;
+
+bg2_fail:
+ iounmap(gbase);
+}
+CLK_OF_DECLARE(berlin2_clock, "marvell,berlin2-chip-ctrl",
+ berlin2_clock_setup);
+CLK_OF_DECLARE(berlin2cd_clock, "marvell,berlin2cd-chip-ctrl",
+ berlin2_clock_setup);
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
new file mode 100644
index 00000000000..21784e4eb3f
--- /dev/null
+++ b/drivers/clk/berlin/bg2q.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/berlin2q.h>
+
+#include "berlin2-div.h"
+#include "berlin2-pll.h"
+#include "common.h"
+
+#define REG_PINMUX0 0x0018
+#define REG_PINMUX5 0x002c
+#define REG_SYSPLLCTL0 0x0030
+#define REG_SYSPLLCTL4 0x0040
+#define REG_CLKENABLE 0x00e8
+#define REG_CLKSELECT0 0x00ec
+#define REG_CLKSELECT1 0x00f0
+#define REG_CLKSELECT2 0x00f4
+#define REG_CLKSWITCH0 0x00f8
+#define REG_CLKSWITCH1 0x00fc
+#define REG_SW_GENERIC0 0x0110
+#define REG_SW_GENERIC3 0x011c
+#define REG_SDIO0XIN_CLKCTL 0x0158
+#define REG_SDIO1XIN_CLKCTL 0x015c
+
+#define MAX_CLKS 27
+static struct clk *clks[MAX_CLKS];
+static struct clk_onecell_data clk_data;
+static DEFINE_SPINLOCK(lock);
+static void __iomem *gbase;
+static void __iomem *cpupll_base;
+
+enum {
+ REFCLK,
+ SYSPLL, CPUPLL,
+ AVPLL_B1, AVPLL_B2, AVPLL_B3, AVPLL_B4,
+ AVPLL_B5, AVPLL_B6, AVPLL_B7, AVPLL_B8,
+};
+
+static const char *clk_names[] = {
+ [REFCLK] = "refclk",
+ [SYSPLL] = "syspll",
+ [CPUPLL] = "cpupll",
+ [AVPLL_B1] = "avpll_b1",
+ [AVPLL_B2] = "avpll_b2",
+ [AVPLL_B3] = "avpll_b3",
+ [AVPLL_B4] = "avpll_b4",
+ [AVPLL_B5] = "avpll_b5",
+ [AVPLL_B6] = "avpll_b6",
+ [AVPLL_B7] = "avpll_b7",
+ [AVPLL_B8] = "avpll_b8",
+};
+
+static const struct berlin2_pll_map bg2q_pll_map __initconst = {
+ .vcodiv = {1, 0, 2, 0, 3, 4, 0, 6, 8},
+ .mult = 1,
+ .fbdiv_shift = 7,
+ .rfdiv_shift = 2,
+ .divsel_shift = 9,
+};
+
+static const u8 default_parent_ids[] = {
+ SYSPLL, AVPLL_B4, AVPLL_B5, AVPLL_B6, AVPLL_B7, SYSPLL
+};
+
+static const struct berlin2_div_data bg2q_divs[] __initconst = {
+ {
+ .name = "sys",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 0),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 0),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 3),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 3),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 4),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 5),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ {
+ .name = "drmfigo",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 17),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 6),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 9),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 6),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 7),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 8),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "cfg",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 1),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 12),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 15),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 9),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 10),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 11),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "gfx2d",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 4),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 18),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 21),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 12),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 13),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 14),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "zsp",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 6),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT0, 24),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT0, 27),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 15),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 16),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 17),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "perif",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 7),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 0),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 3),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 18),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 19),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 20),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ {
+ .name = "pcube",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 2),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 6),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 9),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 21),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 22),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 23),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "vscope",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 3),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 12),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 15),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 24),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 25),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 26),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "nfc_ecc",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 19),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 18),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 21),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 27),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 28),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH0, 29),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "vpp",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 21),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT1, 24),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT1, 27),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH0, 30),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH0, 31),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 0),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "app",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_DIV_GATE(REG_CLKENABLE, 20),
+ BERLIN2_PLL_SELECT(REG_CLKSELECT2, 0),
+ BERLIN2_DIV_SELECT(REG_CLKSELECT2, 3),
+ BERLIN2_PLL_SWITCH(REG_CLKSWITCH1, 1),
+ BERLIN2_DIV_SWITCH(REG_CLKSWITCH1, 2),
+ BERLIN2_DIV_D3SWITCH(REG_CLKSWITCH1, 3),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "sdio0xin",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_SDIO0XIN_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+ {
+ .name = "sdio1xin",
+ .parent_ids = default_parent_ids,
+ .num_parents = ARRAY_SIZE(default_parent_ids),
+ .map = {
+ BERLIN2_SINGLE_DIV(REG_SDIO1XIN_CLKCTL),
+ },
+ .div_flags = BERLIN2_DIV_HAS_GATE | BERLIN2_DIV_HAS_MUX,
+ .flags = 0,
+ },
+};
+
+static const struct berlin2_gate_data bg2q_gates[] __initconst = {
+ { "gfx2daxi", "perif", 5 },
+ { "geth0", "perif", 8 },
+ { "sata", "perif", 9 },
+ { "ahbapb", "perif", 10, CLK_IGNORE_UNUSED },
+ { "usb0", "perif", 11 },
+ { "usb1", "perif", 12 },
+ { "usb2", "perif", 13 },
+ { "usb3", "perif", 14 },
+ { "pbridge", "perif", 15, CLK_IGNORE_UNUSED },
+ { "sdio", "perif", 16, CLK_IGNORE_UNUSED },
+ { "nfc", "perif", 18 },
+ { "smemc", "perif", 19 },
+ { "pcie", "perif", 22 },
+};
+
+static void __init berlin2q_clock_setup(struct device_node *np)
+{
+ const char *parent_names[9];
+ struct clk *clk;
+ int n;
+
+ gbase = of_iomap(np, 0);
+ if (!gbase) {
+ pr_err("%s: Unable to map global base\n", np->full_name);
+ return;
+ }
+
+ /* BG2Q CPU PLL is not part of global registers */
+ cpupll_base = of_iomap(np, 1);
+ if (!cpupll_base) {
+ pr_err("%s: Unable to map cpupll base\n", np->full_name);
+ iounmap(gbase);
+ return;
+ }
+
+ /* overwrite default clock names with DT provided ones */
+ clk = of_clk_get_by_name(np, clk_names[REFCLK]);
+ if (!IS_ERR(clk)) {
+ clk_names[REFCLK] = __clk_get_name(clk);
+ clk_put(clk);
+ }
+
+ /* simple register PLLs */
+ clk = berlin2_pll_register(&bg2q_pll_map, gbase + REG_SYSPLLCTL0,
+ clk_names[SYSPLL], clk_names[REFCLK], 0);
+ if (IS_ERR(clk))
+ goto bg2q_fail;
+
+ clk = berlin2_pll_register(&bg2q_pll_map, cpupll_base,
+ clk_names[CPUPLL], clk_names[REFCLK], 0);
+ if (IS_ERR(clk))
+ goto bg2q_fail;
+
+ /* TODO: add BG2Q AVPLL */
+
+ /*
+ * TODO: add reference clock bypass switches:
+ * memPLLSWBypass, cpuPLLSWBypass, and sysPLLSWBypass
+ */
+
+ /* clock divider cells */
+ for (n = 0; n < ARRAY_SIZE(bg2q_divs); n++) {
+ const struct berlin2_div_data *dd = &bg2q_divs[n];
+ int k;
+
+ for (k = 0; k < dd->num_parents; k++)
+ parent_names[k] = clk_names[dd->parent_ids[k]];
+
+ clks[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
+ dd->name, dd->div_flags, parent_names,
+ dd->num_parents, dd->flags, &lock);
+ }
+
+ /* clock gate cells */
+ for (n = 0; n < ARRAY_SIZE(bg2q_gates); n++) {
+ const struct berlin2_gate_data *gd = &bg2q_gates[n];
+
+ clks[CLKID_GFX2DAXI + n] = clk_register_gate(NULL, gd->name,
+ gd->parent_name, gd->flags, gbase + REG_CLKENABLE,
+ gd->bit_idx, 0, &lock);
+ }
+
+ /*
+ * twdclk is derived from cpu/3
+ * TODO: use cpupll until cpuclk is not available
+ */
+ clks[CLKID_TWD] =
+ clk_register_fixed_factor(NULL, "twd", clk_names[CPUPLL],
+ 0, 1, 3);
+
+ /* check for errors on leaf clocks */
+ for (n = 0; n < MAX_CLKS; n++) {
+ if (!IS_ERR(clks[n]))
+ continue;
+
+ pr_err("%s: Unable to register leaf clock %d\n",
+ np->full_name, n);
+ goto bg2q_fail;
+ }
+
+ /* register clk-provider */
+ clk_data.clks = clks;
+ clk_data.clk_num = MAX_CLKS;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ return;
+
+bg2q_fail:
+ iounmap(cpupll_base);
+ iounmap(gbase);
+}
+CLK_OF_DECLARE(berlin2q_clock, "marvell,berlin2q-chip-ctrl",
+ berlin2q_clock_setup);
diff --git a/drivers/clk/berlin/common.h b/drivers/clk/berlin/common.h
new file mode 100644
index 00000000000..bc68a14c455
--- /dev/null
+++ b/drivers/clk/berlin/common.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014 Marvell Technology Group Ltd.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __BERLIN2_COMMON_H
+#define __BERLIN2_COMMON_H
+
+struct berlin2_gate_data {
+ const char *name;
+ const char *parent_name;
+ u8 bit_idx;
+ unsigned long flags;
+};
+
+#endif /* BERLIN2_COMMON_H */
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
new file mode 100644
index 00000000000..1127ee46b80
--- /dev/null
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -0,0 +1,557 @@
+/*
+ * AXI clkgen driver
+ *
+ * Copyright 2012-2013 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+#define AXI_CLKGEN_V1_REG_UPDATE_ENABLE 0x04
+#define AXI_CLKGEN_V1_REG_CLK_OUT1 0x08
+#define AXI_CLKGEN_V1_REG_CLK_OUT2 0x0c
+#define AXI_CLKGEN_V1_REG_CLK_DIV 0x10
+#define AXI_CLKGEN_V1_REG_CLK_FB1 0x14
+#define AXI_CLKGEN_V1_REG_CLK_FB2 0x18
+#define AXI_CLKGEN_V1_REG_LOCK1 0x1c
+#define AXI_CLKGEN_V1_REG_LOCK2 0x20
+#define AXI_CLKGEN_V1_REG_LOCK3 0x24
+#define AXI_CLKGEN_V1_REG_FILTER1 0x28
+#define AXI_CLKGEN_V1_REG_FILTER2 0x2c
+
+#define AXI_CLKGEN_V2_REG_RESET 0x40
+#define AXI_CLKGEN_V2_REG_DRP_CNTRL 0x70
+#define AXI_CLKGEN_V2_REG_DRP_STATUS 0x74
+
+#define AXI_CLKGEN_V2_RESET_MMCM_ENABLE BIT(1)
+#define AXI_CLKGEN_V2_RESET_ENABLE BIT(0)
+
+#define AXI_CLKGEN_V2_DRP_CNTRL_SEL BIT(29)
+#define AXI_CLKGEN_V2_DRP_CNTRL_READ BIT(28)
+
+#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
+
+#define MMCM_REG_CLKOUT0_1 0x08
+#define MMCM_REG_CLKOUT0_2 0x09
+#define MMCM_REG_CLK_FB1 0x14
+#define MMCM_REG_CLK_FB2 0x15
+#define MMCM_REG_CLK_DIV 0x16
+#define MMCM_REG_LOCK1 0x18
+#define MMCM_REG_LOCK2 0x19
+#define MMCM_REG_LOCK3 0x1a
+#define MMCM_REG_FILTER1 0x4e
+#define MMCM_REG_FILTER2 0x4f
+
+struct axi_clkgen;
+
+struct axi_clkgen_mmcm_ops {
+ void (*enable)(struct axi_clkgen *axi_clkgen, bool enable);
+ int (*write)(struct axi_clkgen *axi_clkgen, unsigned int reg,
+ unsigned int val, unsigned int mask);
+ int (*read)(struct axi_clkgen *axi_clkgen, unsigned int reg,
+ unsigned int *val);
+};
+
+struct axi_clkgen {
+ void __iomem *base;
+ const struct axi_clkgen_mmcm_ops *mmcm_ops;
+ struct clk_hw clk_hw;
+};
+
+static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
+ bool enable)
+{
+ axi_clkgen->mmcm_ops->enable(axi_clkgen, enable);
+}
+
+static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int val, unsigned int mask)
+{
+ return axi_clkgen->mmcm_ops->write(axi_clkgen, reg, val, mask);
+}
+
+static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int *val)
+{
+ return axi_clkgen->mmcm_ops->read(axi_clkgen, reg, val);
+}
+
+static uint32_t axi_clkgen_lookup_filter(unsigned int m)
+{
+ switch (m) {
+ case 0:
+ return 0x01001990;
+ case 1:
+ return 0x01001190;
+ case 2:
+ return 0x01009890;
+ case 3:
+ return 0x01001890;
+ case 4:
+ return 0x01008890;
+ case 5 ... 8:
+ return 0x01009090;
+ case 9 ... 11:
+ return 0x01000890;
+ case 12:
+ return 0x08009090;
+ case 13 ... 22:
+ return 0x01001090;
+ case 23 ... 36:
+ return 0x01008090;
+ case 37 ... 46:
+ return 0x08001090;
+ default:
+ return 0x08008090;
+ }
+}
+
+static const uint32_t axi_clkgen_lock_table[] = {
+ 0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8,
+ 0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8,
+ 0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339,
+ 0x1f1f02ee, 0x1f1f02bc, 0x1f1f028a, 0x1f1f0271,
+ 0x1f1f023f, 0x1f1f0226, 0x1f1f020d, 0x1f1f01f4,
+ 0x1f1f01db, 0x1f1f01c2, 0x1f1f01a9, 0x1f1f0190,
+ 0x1f1f0190, 0x1f1f0177, 0x1f1f015e, 0x1f1f015e,
+ 0x1f1f0145, 0x1f1f0145, 0x1f1f012c, 0x1f1f012c,
+ 0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113,
+};
+
+static uint32_t axi_clkgen_lookup_lock(unsigned int m)
+{
+ if (m < ARRAY_SIZE(axi_clkgen_lock_table))
+ return axi_clkgen_lock_table[m];
+ return 0x1f1f00fa;
+}
+
+static const unsigned int fpfd_min = 10000;
+static const unsigned int fpfd_max = 300000;
+static const unsigned int fvco_min = 600000;
+static const unsigned int fvco_max = 1200000;
+
+static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
+ unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
+{
+ unsigned long d, d_min, d_max, _d_min, _d_max;
+ unsigned long m, m_min, m_max;
+ unsigned long f, dout, best_f, fvco;
+
+ fin /= 1000;
+ fout /= 1000;
+
+ best_f = ULONG_MAX;
+ *best_d = 0;
+ *best_m = 0;
+ *best_dout = 0;
+
+ d_min = max_t(unsigned long, DIV_ROUND_UP(fin, fpfd_max), 1);
+ d_max = min_t(unsigned long, fin / fpfd_min, 80);
+
+ m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min, fin) * d_min, 1);
+ m_max = min_t(unsigned long, fvco_max * d_max / fin, 64);
+
+ for (m = m_min; m <= m_max; m++) {
+ _d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max));
+ _d_max = min(d_max, fin * m / fvco_min);
+
+ for (d = _d_min; d <= _d_max; d++) {
+ fvco = fin * m / d;
+
+ dout = DIV_ROUND_CLOSEST(fvco, fout);
+ dout = clamp_t(unsigned long, dout, 1, 128);
+ f = fvco / dout;
+ if (abs(f - fout) < abs(best_f - fout)) {
+ best_f = f;
+ *best_d = d;
+ *best_m = m;
+ *best_dout = dout;
+ if (best_f == fout)
+ return;
+ }
+ }
+ }
+}
+
+static void axi_clkgen_calc_clk_params(unsigned int divider, unsigned int *low,
+ unsigned int *high, unsigned int *edge, unsigned int *nocount)
+{
+ if (divider == 1)
+ *nocount = 1;
+ else
+ *nocount = 0;
+
+ *high = divider / 2;
+ *edge = divider % 2;
+ *low = divider - *high;
+}
+
+static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int val)
+{
+ writel(val, axi_clkgen->base + reg);
+}
+
+static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int *val)
+{
+ *val = readl(axi_clkgen->base + reg);
+}
+
+static unsigned int axi_clkgen_v1_map_mmcm_reg(unsigned int reg)
+{
+ switch (reg) {
+ case MMCM_REG_CLKOUT0_1:
+ return AXI_CLKGEN_V1_REG_CLK_OUT1;
+ case MMCM_REG_CLKOUT0_2:
+ return AXI_CLKGEN_V1_REG_CLK_OUT2;
+ case MMCM_REG_CLK_FB1:
+ return AXI_CLKGEN_V1_REG_CLK_FB1;
+ case MMCM_REG_CLK_FB2:
+ return AXI_CLKGEN_V1_REG_CLK_FB2;
+ case MMCM_REG_CLK_DIV:
+ return AXI_CLKGEN_V1_REG_CLK_DIV;
+ case MMCM_REG_LOCK1:
+ return AXI_CLKGEN_V1_REG_LOCK1;
+ case MMCM_REG_LOCK2:
+ return AXI_CLKGEN_V1_REG_LOCK2;
+ case MMCM_REG_LOCK3:
+ return AXI_CLKGEN_V1_REG_LOCK3;
+ case MMCM_REG_FILTER1:
+ return AXI_CLKGEN_V1_REG_FILTER1;
+ case MMCM_REG_FILTER2:
+ return AXI_CLKGEN_V1_REG_FILTER2;
+ default:
+ return 0;
+ }
+}
+
+static int axi_clkgen_v1_mmcm_write(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int val, unsigned int mask)
+{
+ reg = axi_clkgen_v1_map_mmcm_reg(reg);
+ if (reg == 0)
+ return -EINVAL;
+
+ axi_clkgen_write(axi_clkgen, reg, val);
+
+ return 0;
+}
+
+static int axi_clkgen_v1_mmcm_read(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int *val)
+{
+ reg = axi_clkgen_v1_map_mmcm_reg(reg);
+ if (reg == 0)
+ return -EINVAL;
+
+ axi_clkgen_read(axi_clkgen, reg, val);
+
+ return 0;
+}
+
+static void axi_clkgen_v1_mmcm_enable(struct axi_clkgen *axi_clkgen,
+ bool enable)
+{
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V1_REG_UPDATE_ENABLE, enable);
+}
+
+static const struct axi_clkgen_mmcm_ops axi_clkgen_v1_mmcm_ops = {
+ .write = axi_clkgen_v1_mmcm_write,
+ .read = axi_clkgen_v1_mmcm_read,
+ .enable = axi_clkgen_v1_mmcm_enable,
+};
+
+static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
+{
+ unsigned int timeout = 10000;
+ unsigned int val;
+
+ do {
+ axi_clkgen_read(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_STATUS, &val);
+ } while ((val & AXI_CLKGEN_V2_DRP_STATUS_BUSY) && --timeout);
+
+ if (val & AXI_CLKGEN_V2_DRP_STATUS_BUSY)
+ return -EIO;
+
+ return val & 0xffff;
+}
+
+static int axi_clkgen_v2_mmcm_read(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ ret = axi_clkgen_wait_non_busy(axi_clkgen);
+ if (ret < 0)
+ return ret;
+
+ reg_val = AXI_CLKGEN_V2_DRP_CNTRL_SEL | AXI_CLKGEN_V2_DRP_CNTRL_READ;
+ reg_val |= (reg << 16);
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val);
+
+ ret = axi_clkgen_wait_non_busy(axi_clkgen);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int val, unsigned int mask)
+{
+ unsigned int reg_val = 0;
+ int ret;
+
+ ret = axi_clkgen_wait_non_busy(axi_clkgen);
+ if (ret < 0)
+ return ret;
+
+ if (mask != 0xffff) {
+ axi_clkgen_v2_mmcm_read(axi_clkgen, reg, &reg_val);
+ reg_val &= ~mask;
+ }
+
+ reg_val |= AXI_CLKGEN_V2_DRP_CNTRL_SEL | (reg << 16) | (val & mask);
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val);
+
+ return 0;
+}
+
+static void axi_clkgen_v2_mmcm_enable(struct axi_clkgen *axi_clkgen,
+ bool enable)
+{
+ unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
+
+ if (enable)
+ val |= AXI_CLKGEN_V2_RESET_MMCM_ENABLE;
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_RESET, val);
+}
+
+static const struct axi_clkgen_mmcm_ops axi_clkgen_v2_mmcm_ops = {
+ .write = axi_clkgen_v2_mmcm_write,
+ .read = axi_clkgen_v2_mmcm_read,
+ .enable = axi_clkgen_v2_mmcm_enable,
+};
+
+static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
+{
+ return container_of(clk_hw, struct axi_clkgen, clk_hw);
+}
+
+static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+ unsigned int d, m, dout;
+ unsigned int nocount;
+ unsigned int high;
+ unsigned int edge;
+ unsigned int low;
+ uint32_t filter;
+ uint32_t lock;
+
+ if (parent_rate == 0 || rate == 0)
+ return -EINVAL;
+
+ axi_clkgen_calc_params(parent_rate, rate, &d, &m, &dout);
+
+ if (d == 0 || dout == 0 || m == 0)
+ return -EINVAL;
+
+ filter = axi_clkgen_lookup_filter(m - 1);
+ lock = axi_clkgen_lookup_lock(m - 1);
+
+ axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_1,
+ (high << 6) | low, 0xefff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_2,
+ (edge << 7) | (nocount << 6), 0x03ff);
+
+ axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
+ (edge << 13) | (nocount << 12) | (high << 6) | low, 0x3fff);
+
+ axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB1,
+ (high << 6) | low, 0xefff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB2,
+ (edge << 7) | (nocount << 6), 0x03ff);
+
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
+ (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3,
+ (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900);
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900);
+
+ return 0;
+}
+
+static long axi_clkgen_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int d, m, dout;
+
+ axi_clkgen_calc_params(*parent_rate, rate, &d, &m, &dout);
+
+ if (d == 0 || dout == 0 || m == 0)
+ return -EINVAL;
+
+ return *parent_rate / d * m / dout;
+}
+
+static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
+ unsigned long parent_rate)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+ unsigned int d, m, dout;
+ unsigned int reg;
+ unsigned long long tmp;
+
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, &reg);
+ dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &reg);
+ d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, &reg);
+ m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+
+ if (d == 0 || dout == 0)
+ return 0;
+
+ tmp = (unsigned long long)(parent_rate / d) * m;
+ do_div(tmp, dout);
+
+ if (tmp > ULONG_MAX)
+ return ULONG_MAX;
+
+ return tmp;
+}
+
+static int axi_clkgen_enable(struct clk_hw *clk_hw)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+
+ axi_clkgen_mmcm_enable(axi_clkgen, true);
+
+ return 0;
+}
+
+static void axi_clkgen_disable(struct clk_hw *clk_hw)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+
+ axi_clkgen_mmcm_enable(axi_clkgen, false);
+}
+
+static const struct clk_ops axi_clkgen_ops = {
+ .recalc_rate = axi_clkgen_recalc_rate,
+ .round_rate = axi_clkgen_round_rate,
+ .set_rate = axi_clkgen_set_rate,
+ .enable = axi_clkgen_enable,
+ .disable = axi_clkgen_disable,
+};
+
+static const struct of_device_id axi_clkgen_ids[] = {
+ {
+ .compatible = "adi,axi-clkgen-1.00.a",
+ .data = &axi_clkgen_v1_mmcm_ops
+ }, {
+ .compatible = "adi,axi-clkgen-2.00.a",
+ .data = &axi_clkgen_v2_mmcm_ops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
+
+static int axi_clkgen_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct axi_clkgen *axi_clkgen;
+ struct clk_init_data init;
+ const char *parent_name;
+ const char *clk_name;
+ struct resource *mem;
+ struct clk *clk;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ id = of_match_node(axi_clkgen_ids, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL);
+ if (!axi_clkgen)
+ return -ENOMEM;
+
+ axi_clkgen->mmcm_ops = id->data;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(axi_clkgen->base))
+ return PTR_ERR(axi_clkgen->base);
+
+ parent_name = of_clk_get_parent_name(pdev->dev.of_node, 0);
+ if (!parent_name)
+ return -EINVAL;
+
+ clk_name = pdev->dev.of_node->name;
+ of_property_read_string(pdev->dev.of_node, "clock-output-names",
+ &clk_name);
+
+ init.name = clk_name;
+ init.ops = &axi_clkgen_ops;
+ init.flags = CLK_SET_RATE_GATE;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ axi_clkgen_mmcm_enable(axi_clkgen, false);
+
+ axi_clkgen->clk_hw.init = &init;
+ clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get,
+ clk);
+}
+
+static int axi_clkgen_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static struct platform_driver axi_clkgen_driver = {
+ .driver = {
+ .name = "adi-axi-clkgen",
+ .owner = THIS_MODULE,
+ .of_match_table = axi_clkgen_ids,
+ },
+ .probe = axi_clkgen_probe,
+ .remove = axi_clkgen_remove,
+};
+module_platform_driver(axi_clkgen_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Driver for the Analog Devices' AXI clkgen pcore clock generator");
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
new file mode 100644
index 00000000000..d2f1e119b45
--- /dev/null
+++ b/drivers/clk/clk-axm5516.c
@@ -0,0 +1,615 @@
+/*
+ * drivers/clk/clk-axm5516.c
+ *
+ * Provides clock implementations for three different types of clock devices on
+ * the Axxia device: PLL clock, a clock divider and a clock mux.
+ *
+ * Copyright (C) 2014 LSI Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <dt-bindings/clock/lsi,axm5516-clks.h>
+
+
+/**
+ * struct axxia_clk - Common struct to all Axxia clocks.
+ * @hw: clk_hw for the common clk framework
+ * @regmap: Regmap for the clock control registers
+ */
+struct axxia_clk {
+ struct clk_hw hw;
+ struct regmap *regmap;
+};
+#define to_axxia_clk(_hw) container_of(_hw, struct axxia_clk, hw)
+
+/**
+ * struct axxia_pllclk - Axxia PLL generated clock.
+ * @aclk: Common struct
+ * @reg: Offset into regmap for PLL control register
+ */
+struct axxia_pllclk {
+ struct axxia_clk aclk;
+ u32 reg;
+};
+#define to_axxia_pllclk(_aclk) container_of(_aclk, struct axxia_pllclk, aclk)
+
+/**
+ * axxia_pllclk_recalc - Calculate the PLL generated clock rate given the
+ * parent clock rate.
+ */
+static unsigned long
+axxia_pllclk_recalc(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct axxia_clk *aclk = to_axxia_clk(hw);
+ struct axxia_pllclk *pll = to_axxia_pllclk(aclk);
+ unsigned long rate, fbdiv, refdiv, postdiv;
+ u32 control;
+
+ regmap_read(aclk->regmap, pll->reg, &control);
+ postdiv = ((control >> 0) & 0xf) + 1;
+ fbdiv = ((control >> 4) & 0xfff) + 3;
+ refdiv = ((control >> 16) & 0x1f) + 1;
+ rate = (parent_rate / (refdiv * postdiv)) * fbdiv;
+
+ return rate;
+}
+
+static const struct clk_ops axxia_pllclk_ops = {
+ .recalc_rate = axxia_pllclk_recalc,
+};
+
+/**
+ * struct axxia_divclk - Axxia clock divider
+ * @aclk: Common struct
+ * @reg: Offset into regmap for PLL control register
+ * @shift: Bit position for divider value
+ * @width: Number of bits in divider value
+ */
+struct axxia_divclk {
+ struct axxia_clk aclk;
+ u32 reg;
+ u32 shift;
+ u32 width;
+};
+#define to_axxia_divclk(_aclk) container_of(_aclk, struct axxia_divclk, aclk)
+
+/**
+ * axxia_divclk_recalc_rate - Calculate clock divider output rage
+ */
+static unsigned long
+axxia_divclk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct axxia_clk *aclk = to_axxia_clk(hw);
+ struct axxia_divclk *divclk = to_axxia_divclk(aclk);
+ u32 ctrl, div;
+
+ regmap_read(aclk->regmap, divclk->reg, &ctrl);
+ div = 1 + ((ctrl >> divclk->shift) & ((1 << divclk->width)-1));
+
+ return parent_rate / div;
+}
+
+static const struct clk_ops axxia_divclk_ops = {
+ .recalc_rate = axxia_divclk_recalc_rate,
+};
+
+/**
+ * struct axxia_clkmux - Axxia clock mux
+ * @aclk: Common struct
+ * @reg: Offset into regmap for PLL control register
+ * @shift: Bit position for selection value
+ * @width: Number of bits in selection value
+ */
+struct axxia_clkmux {
+ struct axxia_clk aclk;
+ u32 reg;
+ u32 shift;
+ u32 width;
+};
+#define to_axxia_clkmux(_aclk) container_of(_aclk, struct axxia_clkmux, aclk)
+
+/**
+ * axxia_clkmux_get_parent - Return the index of selected parent clock
+ */
+static u8 axxia_clkmux_get_parent(struct clk_hw *hw)
+{
+ struct axxia_clk *aclk = to_axxia_clk(hw);
+ struct axxia_clkmux *mux = to_axxia_clkmux(aclk);
+ u32 ctrl, parent;
+
+ regmap_read(aclk->regmap, mux->reg, &ctrl);
+ parent = (ctrl >> mux->shift) & ((1 << mux->width) - 1);
+
+ return (u8) parent;
+}
+
+static const struct clk_ops axxia_clkmux_ops = {
+ .get_parent = axxia_clkmux_get_parent,
+};
+
+
+/*
+ * PLLs
+ */
+
+static struct axxia_pllclk clk_fab_pll = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_fab_pll",
+ .parent_names = (const char *[]){
+ "clk_ref0"
+ },
+ .num_parents = 1,
+ .ops = &axxia_pllclk_ops,
+ },
+ .reg = 0x01800,
+};
+
+static struct axxia_pllclk clk_cpu_pll = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu_pll",
+ .parent_names = (const char *[]){
+ "clk_ref0"
+ },
+ .num_parents = 1,
+ .ops = &axxia_pllclk_ops,
+ },
+ .reg = 0x02000,
+};
+
+static struct axxia_pllclk clk_sys_pll = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_sys_pll",
+ .parent_names = (const char *[]){
+ "clk_ref0"
+ },
+ .num_parents = 1,
+ .ops = &axxia_pllclk_ops,
+ },
+ .reg = 0x02800,
+};
+
+static struct axxia_pllclk clk_sm0_pll = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_sm0_pll",
+ .parent_names = (const char *[]){
+ "clk_ref2"
+ },
+ .num_parents = 1,
+ .ops = &axxia_pllclk_ops,
+ },
+ .reg = 0x03000,
+};
+
+static struct axxia_pllclk clk_sm1_pll = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_sm1_pll",
+ .parent_names = (const char *[]){
+ "clk_ref1"
+ },
+ .num_parents = 1,
+ .ops = &axxia_pllclk_ops,
+ },
+ .reg = 0x03800,
+};
+
+/*
+ * Clock dividers
+ */
+
+static struct axxia_divclk clk_cpu0_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu0_div",
+ .parent_names = (const char *[]){
+ "clk_cpu_pll"
+ },
+ .num_parents = 1,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x10008,
+ .shift = 0,
+ .width = 4,
+};
+
+static struct axxia_divclk clk_cpu1_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu1_div",
+ .parent_names = (const char *[]){
+ "clk_cpu_pll"
+ },
+ .num_parents = 1,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x10008,
+ .shift = 4,
+ .width = 4,
+};
+
+static struct axxia_divclk clk_cpu2_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu2_div",
+ .parent_names = (const char *[]){
+ "clk_cpu_pll"
+ },
+ .num_parents = 1,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x10008,
+ .shift = 8,
+ .width = 4,
+};
+
+static struct axxia_divclk clk_cpu3_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu3_div",
+ .parent_names = (const char *[]){
+ "clk_cpu_pll"
+ },
+ .num_parents = 1,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x10008,
+ .shift = 12,
+ .width = 4,
+};
+
+static struct axxia_divclk clk_nrcp_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_nrcp_div",
+ .parent_names = (const char *[]){
+ "clk_sys_pll"
+ },
+ .num_parents = 1,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x1000c,
+ .shift = 0,
+ .width = 4,
+};
+
+static struct axxia_divclk clk_sys_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_sys_div",
+ .parent_names = (const char *[]){
+ "clk_sys_pll"
+ },
+ .num_parents = 1,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x1000c,
+ .shift = 4,
+ .width = 4,
+};
+
+static struct axxia_divclk clk_fab_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_fab_div",
+ .parent_names = (const char *[]){
+ "clk_fab_pll"
+ },
+ .num_parents = 1,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x1000c,
+ .shift = 8,
+ .width = 4,
+};
+
+static struct axxia_divclk clk_per_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_per_div",
+ .parent_names = (const char *[]){
+ "clk_sm1_pll"
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_BASIC,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x1000c,
+ .shift = 12,
+ .width = 4,
+};
+
+static struct axxia_divclk clk_mmc_div = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_mmc_div",
+ .parent_names = (const char *[]){
+ "clk_sm1_pll"
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_BASIC,
+ .ops = &axxia_divclk_ops,
+ },
+ .reg = 0x1000c,
+ .shift = 16,
+ .width = 4,
+};
+
+/*
+ * Clock MUXes
+ */
+
+static struct axxia_clkmux clk_cpu0_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu0",
+ .parent_names = (const char *[]){
+ "clk_ref0",
+ "clk_cpu_pll",
+ "clk_cpu0_div",
+ "clk_cpu0_div"
+ },
+ .num_parents = 4,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10000,
+ .shift = 0,
+ .width = 2,
+};
+
+static struct axxia_clkmux clk_cpu1_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu1",
+ .parent_names = (const char *[]){
+ "clk_ref0",
+ "clk_cpu_pll",
+ "clk_cpu1_div",
+ "clk_cpu1_div"
+ },
+ .num_parents = 4,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10000,
+ .shift = 2,
+ .width = 2,
+};
+
+static struct axxia_clkmux clk_cpu2_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu2",
+ .parent_names = (const char *[]){
+ "clk_ref0",
+ "clk_cpu_pll",
+ "clk_cpu2_div",
+ "clk_cpu2_div"
+ },
+ .num_parents = 4,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10000,
+ .shift = 4,
+ .width = 2,
+};
+
+static struct axxia_clkmux clk_cpu3_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_cpu3",
+ .parent_names = (const char *[]){
+ "clk_ref0",
+ "clk_cpu_pll",
+ "clk_cpu3_div",
+ "clk_cpu3_div"
+ },
+ .num_parents = 4,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10000,
+ .shift = 6,
+ .width = 2,
+};
+
+static struct axxia_clkmux clk_nrcp_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_nrcp",
+ .parent_names = (const char *[]){
+ "clk_ref0",
+ "clk_sys_pll",
+ "clk_nrcp_div",
+ "clk_nrcp_div"
+ },
+ .num_parents = 4,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10004,
+ .shift = 0,
+ .width = 2,
+};
+
+static struct axxia_clkmux clk_sys_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_sys",
+ .parent_names = (const char *[]){
+ "clk_ref0",
+ "clk_sys_pll",
+ "clk_sys_div",
+ "clk_sys_div"
+ },
+ .num_parents = 4,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10004,
+ .shift = 2,
+ .width = 2,
+};
+
+static struct axxia_clkmux clk_fab_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_fab",
+ .parent_names = (const char *[]){
+ "clk_ref0",
+ "clk_fab_pll",
+ "clk_fab_div",
+ "clk_fab_div"
+ },
+ .num_parents = 4,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10004,
+ .shift = 4,
+ .width = 2,
+};
+
+static struct axxia_clkmux clk_per_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_per",
+ .parent_names = (const char *[]){
+ "clk_ref1",
+ "clk_per_div"
+ },
+ .num_parents = 2,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10004,
+ .shift = 6,
+ .width = 1,
+};
+
+static struct axxia_clkmux clk_mmc_mux = {
+ .aclk.hw.init = &(struct clk_init_data){
+ .name = "clk_mmc",
+ .parent_names = (const char *[]){
+ "clk_ref1",
+ "clk_mmc_div"
+ },
+ .num_parents = 2,
+ .ops = &axxia_clkmux_ops,
+ },
+ .reg = 0x10004,
+ .shift = 9,
+ .width = 1,
+};
+
+/* Table of all supported clocks indexed by the clock identifiers from the
+ * device tree binding
+ */
+static struct axxia_clk *axmclk_clocks[] = {
+ [AXXIA_CLK_FAB_PLL] = &clk_fab_pll.aclk,
+ [AXXIA_CLK_CPU_PLL] = &clk_cpu_pll.aclk,
+ [AXXIA_CLK_SYS_PLL] = &clk_sys_pll.aclk,
+ [AXXIA_CLK_SM0_PLL] = &clk_sm0_pll.aclk,
+ [AXXIA_CLK_SM1_PLL] = &clk_sm1_pll.aclk,
+ [AXXIA_CLK_FAB_DIV] = &clk_fab_div.aclk,
+ [AXXIA_CLK_SYS_DIV] = &clk_sys_div.aclk,
+ [AXXIA_CLK_NRCP_DIV] = &clk_nrcp_div.aclk,
+ [AXXIA_CLK_CPU0_DIV] = &clk_cpu0_div.aclk,
+ [AXXIA_CLK_CPU1_DIV] = &clk_cpu1_div.aclk,
+ [AXXIA_CLK_CPU2_DIV] = &clk_cpu2_div.aclk,
+ [AXXIA_CLK_CPU3_DIV] = &clk_cpu3_div.aclk,
+ [AXXIA_CLK_PER_DIV] = &clk_per_div.aclk,
+ [AXXIA_CLK_MMC_DIV] = &clk_mmc_div.aclk,
+ [AXXIA_CLK_FAB] = &clk_fab_mux.aclk,
+ [AXXIA_CLK_SYS] = &clk_sys_mux.aclk,
+ [AXXIA_CLK_NRCP] = &clk_nrcp_mux.aclk,
+ [AXXIA_CLK_CPU0] = &clk_cpu0_mux.aclk,
+ [AXXIA_CLK_CPU1] = &clk_cpu1_mux.aclk,
+ [AXXIA_CLK_CPU2] = &clk_cpu2_mux.aclk,
+ [AXXIA_CLK_CPU3] = &clk_cpu3_mux.aclk,
+ [AXXIA_CLK_PER] = &clk_per_mux.aclk,
+ [AXXIA_CLK_MMC] = &clk_mmc_mux.aclk,
+};
+
+static const struct regmap_config axmclk_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1fffc,
+ .fast_io = true,
+};
+
+static const struct of_device_id axmclk_match_table[] = {
+ { .compatible = "lsi,axm5516-clks" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axmclk_match_table);
+
+struct axmclk_priv {
+ struct clk_onecell_data onecell;
+ struct clk *clks[];
+};
+
+static int axmclk_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct axmclk_priv *priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &axmclk_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(axmclk_clocks);
+ pr_info("axmclk: supporting %u clocks\n", num_clks);
+ priv = devm_kzalloc(dev, sizeof(*priv) + sizeof(*priv->clks) * num_clks,
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->onecell.clks = priv->clks;
+ priv->onecell.clk_num = num_clks;
+
+ /* Update each entry with the allocated regmap and register the clock
+ * with the common clock framework
+ */
+ for (i = 0; i < num_clks; i++) {
+ axmclk_clocks[i]->regmap = regmap;
+ clk = devm_clk_register(dev, &axmclk_clocks[i]->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ priv->clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node,
+ of_clk_src_onecell_get, &priv->onecell);
+
+ return ret;
+}
+
+static int axmclk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver axmclk_driver = {
+ .probe = axmclk_probe,
+ .remove = axmclk_remove,
+ .driver = {
+ .name = "clk-axm5516",
+ .owner = THIS_MODULE,
+ .of_match_table = axmclk_match_table,
+ },
+};
+
+static int __init axmclk_init(void)
+{
+ return platform_driver_register(&axmclk_driver);
+}
+core_initcall(axmclk_init);
+
+static void __exit axmclk_exit(void)
+{
+ platform_driver_unregister(&axmclk_driver);
+}
+module_exit(axmclk_exit);
+
+MODULE_DESCRIPTION("AXM5516 clock driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:clk-axm5516");
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
new file mode 100644
index 00000000000..6b950ca8b71
--- /dev/null
+++ b/drivers/clk/clk-bcm2835.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010 Broadcom
+ * Copyright (C) 2012 Stephen Warren
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/bcm2835.h>
+#include <linux/of.h>
+
+/*
+ * These are fixed clocks. They're probably not all root clocks and it may
+ * be possible to turn them on and off but until this is mapped out better
+ * it's the only way they can be used.
+ */
+void __init bcm2835_init_clocks(void)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = clk_register_fixed_rate(NULL, "sys_pclk", NULL, CLK_IS_ROOT,
+ 250000000);
+ if (IS_ERR(clk))
+ pr_err("sys_pclk not registered\n");
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT,
+ 126000000);
+ if (IS_ERR(clk))
+ pr_err("apb_pclk not registered\n");
+
+ clk = clk_register_fixed_rate(NULL, "uart0_pclk", NULL, CLK_IS_ROOT,
+ 3000000);
+ if (IS_ERR(clk))
+ pr_err("uart0_pclk not registered\n");
+ ret = clk_register_clkdev(clk, NULL, "20201000.uart");
+ if (ret)
+ pr_err("uart0_pclk alias not registered\n");
+
+ clk = clk_register_fixed_rate(NULL, "uart1_pclk", NULL, CLK_IS_ROOT,
+ 125000000);
+ if (IS_ERR(clk))
+ pr_err("uart1_pclk not registered\n");
+ ret = clk_register_clkdev(clk, NULL, "20215000.uart");
+ if (ret)
+ pr_err("uart1_pclk alias not registered\n");
+}
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
new file mode 100644
index 00000000000..57a078e06ef
--- /dev/null
+++ b/drivers/clk/clk-composite.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
+
+static u8 clk_composite_get_parent(struct clk_hw *hw)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *mux_ops = composite->mux_ops;
+ struct clk_hw *mux_hw = composite->mux_hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->get_parent(mux_hw);
+}
+
+static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *mux_ops = composite->mux_ops;
+ struct clk_hw *mux_hw = composite->mux_hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->set_parent(mux_hw, index);
+}
+
+static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+
+ rate_hw->clk = hw->clk;
+
+ return rate_ops->recalc_rate(rate_hw, parent_rate);
+}
+
+static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_p)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ const struct clk_ops *mux_ops = composite->mux_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+ struct clk_hw *mux_hw = composite->mux_hw;
+
+ if (rate_hw && rate_ops && rate_ops->determine_rate) {
+ rate_hw->clk = hw->clk;
+ return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
+ best_parent_p);
+ } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
+ mux_hw->clk = hw->clk;
+ return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
+ best_parent_p);
+ } else {
+ pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
+ return 0;
+ }
+}
+
+static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+
+ rate_hw->clk = hw->clk;
+
+ return rate_ops->round_rate(rate_hw, rate, prate);
+}
+
+static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+
+ rate_hw->clk = hw->clk;
+
+ return rate_ops->set_rate(rate_hw, rate, parent_rate);
+}
+
+static int clk_composite_is_enabled(struct clk_hw *hw)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *gate_ops = composite->gate_ops;
+ struct clk_hw *gate_hw = composite->gate_hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->is_enabled(gate_hw);
+}
+
+static int clk_composite_enable(struct clk_hw *hw)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *gate_ops = composite->gate_ops;
+ struct clk_hw *gate_hw = composite->gate_hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->enable(gate_hw);
+}
+
+static void clk_composite_disable(struct clk_hw *hw)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *gate_ops = composite->gate_ops;
+ struct clk_hw *gate_hw = composite->gate_hw;
+
+ gate_hw->clk = hw->clk;
+
+ gate_ops->disable(gate_hw);
+}
+
+struct clk *clk_register_composite(struct device *dev, const char *name,
+ const char **parent_names, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+ struct clk_composite *composite;
+ struct clk_ops *clk_composite_ops;
+
+ composite = kzalloc(sizeof(*composite), GFP_KERNEL);
+ if (!composite) {
+ pr_err("%s: could not allocate composite clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk_composite_ops = &composite->ops;
+
+ if (mux_hw && mux_ops) {
+ if (!mux_ops->get_parent || !mux_ops->set_parent) {
+ clk = ERR_PTR(-EINVAL);
+ goto err;
+ }
+
+ composite->mux_hw = mux_hw;
+ composite->mux_ops = mux_ops;
+ clk_composite_ops->get_parent = clk_composite_get_parent;
+ clk_composite_ops->set_parent = clk_composite_set_parent;
+ if (mux_ops->determine_rate)
+ clk_composite_ops->determine_rate = clk_composite_determine_rate;
+ }
+
+ if (rate_hw && rate_ops) {
+ if (!rate_ops->recalc_rate) {
+ clk = ERR_PTR(-EINVAL);
+ goto err;
+ }
+
+ /* .round_rate is a prerequisite for .set_rate */
+ if (rate_ops->round_rate) {
+ clk_composite_ops->round_rate = clk_composite_round_rate;
+ if (rate_ops->set_rate) {
+ clk_composite_ops->set_rate = clk_composite_set_rate;
+ }
+ } else {
+ WARN(rate_ops->set_rate,
+ "%s: missing round_rate op is required\n",
+ __func__);
+ }
+
+ composite->rate_hw = rate_hw;
+ composite->rate_ops = rate_ops;
+ clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
+ if (rate_ops->determine_rate)
+ clk_composite_ops->determine_rate = clk_composite_determine_rate;
+ }
+
+ if (gate_hw && gate_ops) {
+ if (!gate_ops->is_enabled || !gate_ops->enable ||
+ !gate_ops->disable) {
+ clk = ERR_PTR(-EINVAL);
+ goto err;
+ }
+
+ composite->gate_hw = gate_hw;
+ composite->gate_ops = gate_ops;
+ clk_composite_ops->is_enabled = clk_composite_is_enabled;
+ clk_composite_ops->enable = clk_composite_enable;
+ clk_composite_ops->disable = clk_composite_disable;
+ }
+
+ init.ops = clk_composite_ops;
+ composite->hw.init = &init;
+
+ clk = clk_register(dev, &composite->hw);
+ if (IS_ERR(clk))
+ goto err;
+
+ if (composite->mux_hw)
+ composite->mux_hw->clk = clk;
+
+ if (composite->rate_hw)
+ composite->rate_hw->clk = clk;
+
+ if (composite->gate_hw)
+ composite->gate_hw->clk = clk;
+
+ return clk;
+
+err:
+ kfree(composite);
+ return clk;
+}
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
new file mode 100644
index 00000000000..8f571548870
--- /dev/null
+++ b/drivers/clk/clk-devres.c
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/gfp.h>
+
+static void devm_clk_release(struct device *dev, void *res)
+{
+ clk_put(*(struct clk **)res);
+}
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+ struct clk **ptr, *clk;
+
+ ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ clk = clk_get(dev, id);
+ if (!IS_ERR(clk)) {
+ *ptr = clk;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return clk;
+}
+EXPORT_SYMBOL(devm_clk_get);
+
+static int devm_clk_match(struct device *dev, void *res, void *data)
+{
+ struct clk **c = res;
+ if (!c || !*c) {
+ WARN_ON(!c || !*c);
+ return 0;
+ }
+ return *c == data;
+}
+
+void devm_clk_put(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_clk_release, devm_clk_match, clk);
+
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_clk_put);
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
new file mode 100644
index 00000000000..18a9de29df0
--- /dev/null
+++ b/drivers/clk/clk-divider.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Adjustable divider clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+
+/*
+ * DOC: basic adjustable divider clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable. clk->rate = DIV_ROUND_UP(parent->rate / divisor)
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+#define div_mask(d) ((1 << ((d)->width)) - 1)
+
+static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
+{
+ unsigned int maxdiv = 0;
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div > maxdiv)
+ maxdiv = clkt->div;
+ return maxdiv;
+}
+
+static unsigned int _get_table_mindiv(const struct clk_div_table *table)
+{
+ unsigned int mindiv = UINT_MAX;
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div < mindiv)
+ mindiv = clkt->div;
+ return mindiv;
+}
+
+static unsigned int _get_maxdiv(struct clk_divider *divider)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div_mask(divider);
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << div_mask(divider);
+ if (divider->table)
+ return _get_table_maxdiv(divider->table);
+ return div_mask(divider) + 1;
+}
+
+static unsigned int _get_table_div(const struct clk_div_table *table,
+ unsigned int val)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->val == val)
+ return clkt->div;
+ return 0;
+}
+
+static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return val;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << val;
+ if (divider->table)
+ return _get_table_div(divider->table, val);
+ return val + 1;
+}
+
+static unsigned int _get_table_val(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return clkt->val;
+ return 0;
+}
+
+static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return __ffs(div);
+ if (divider->table)
+ return _get_table_val(divider->table, div);
+ return div - 1;
+}
+
+static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int div, val;
+
+ val = clk_readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider);
+
+ div = _get_div(divider, val);
+ if (!div) {
+ WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ __clk_get_name(hw->clk));
+ return parent_rate;
+ }
+
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+/*
+ * The reverse of DIV_ROUND_UP: The maximum number which
+ * divided by m is r
+ */
+#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+
+static bool _is_valid_table_div(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return true;
+ return false;
+}
+
+static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+{
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return is_power_of_2(div);
+ if (divider->table)
+ return _is_valid_table_div(divider->table, div);
+ return true;
+}
+
+static int _round_up_table(const struct clk_div_table *table, int div)
+{
+ const struct clk_div_table *clkt;
+ int up = INT_MAX;
+
+ for (clkt = table; clkt->div; clkt++) {
+ if (clkt->div == div)
+ return clkt->div;
+ else if (clkt->div < div)
+ continue;
+
+ if ((clkt->div - div) < (up - div))
+ up = clkt->div;
+ }
+
+ return up;
+}
+
+static int _round_down_table(const struct clk_div_table *table, int div)
+{
+ const struct clk_div_table *clkt;
+ int down = _get_table_mindiv(table);
+
+ for (clkt = table; clkt->div; clkt++) {
+ if (clkt->div == div)
+ return clkt->div;
+ else if (clkt->div > div)
+ continue;
+
+ if ((div - clkt->div) < (div - down))
+ down = clkt->div;
+ }
+
+ return down;
+}
+
+static int _div_round_up(struct clk_divider *divider,
+ unsigned long parent_rate, unsigned long rate)
+{
+ int div = DIV_ROUND_UP(parent_rate, rate);
+
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ div = __roundup_pow_of_two(div);
+ if (divider->table)
+ div = _round_up_table(divider->table, div);
+
+ return div;
+}
+
+static int _div_round_closest(struct clk_divider *divider,
+ unsigned long parent_rate, unsigned long rate)
+{
+ int up, down, div;
+
+ up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) {
+ up = __roundup_pow_of_two(div);
+ down = __rounddown_pow_of_two(div);
+ } else if (divider->table) {
+ up = _round_up_table(divider->table, div);
+ down = _round_down_table(divider->table, div);
+ }
+
+ return (up - div) <= (div - down) ? up : down;
+}
+
+static int _div_round(struct clk_divider *divider, unsigned long parent_rate,
+ unsigned long rate)
+{
+ if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
+ return _div_round_closest(divider, parent_rate, rate);
+
+ return _div_round_up(divider, parent_rate, rate);
+}
+
+static bool _is_best_div(struct clk_divider *divider,
+ unsigned long rate, unsigned long now, unsigned long best)
+{
+ if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
+ return abs(rate - now) < abs(rate - best);
+
+ return now <= rate && now > best;
+}
+
+static int _next_div(struct clk_divider *divider, int div)
+{
+ div++;
+
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return __roundup_pow_of_two(div);
+ if (divider->table)
+ return _round_up_table(divider->table, div);
+
+ return div;
+}
+
+static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int i, bestdiv = 0;
+ unsigned long parent_rate, best = 0, now, maxdiv;
+ unsigned long parent_rate_saved = *best_parent_rate;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = _get_maxdiv(divider);
+
+ if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
+ bestdiv = _div_round(divider, parent_rate, rate);
+ bestdiv = bestdiv == 0 ? 1 : bestdiv;
+ bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+ return bestdiv;
+ }
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 1; i <= maxdiv; i = _next_div(divider, i)) {
+ if (!_is_valid_div(divider, i))
+ continue;
+ if (rate * i == parent_rate_saved) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ *best_parent_rate = parent_rate_saved;
+ return i;
+ }
+ parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+ MULT_ROUND_UP(rate, i));
+ now = DIV_ROUND_UP(parent_rate, i);
+ if (_is_best_div(divider, rate, now, best)) {
+ bestdiv = i;
+ best = now;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestdiv) {
+ bestdiv = _get_maxdiv(divider);
+ *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
+ }
+
+ return bestdiv;
+}
+
+static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div;
+ div = clk_divider_bestdiv(hw, rate, prate);
+
+ return DIV_ROUND_UP(*prate, div);
+}
+
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int div, value;
+ unsigned long flags = 0;
+ u32 val;
+
+ div = DIV_ROUND_UP(parent_rate, rate);
+
+ if (!_is_valid_div(divider, div))
+ return -EINVAL;
+
+ value = _get_val(divider, div);
+
+ if (value > div_mask(divider))
+ value = div_mask(divider);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = div_mask(divider) << (divider->shift + 16);
+ } else {
+ val = clk_readl(divider->reg);
+ val &= ~(div_mask(divider) << divider->shift);
+ }
+ val |= value << divider->shift;
+ clk_writel(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops clk_divider_ops = {
+ .recalc_rate = clk_divider_recalc_rate,
+ .round_rate = clk_divider_round_rate,
+ .set_rate = clk_divider_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_divider_ops);
+
+const struct clk_ops clk_divider_ro_ops = {
+ .recalc_rate = clk_divider_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
+
+static struct clk *_register_divider(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
+ if (!div) {
+ pr_err("%s: could not allocate divider clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_divider_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ clk = clk_register(dev, &div->hw);
+
+ if (IS_ERR(clk))
+ kfree(div);
+
+ return clk;
+}
+
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+struct clk *clk_register_divider(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, spinlock_t *lock)
+{
+ return _register_divider(dev, name, parent_name, flags, reg, shift,
+ width, clk_divider_flags, NULL, lock);
+}
+EXPORT_SYMBOL_GPL(clk_register_divider);
+
+/**
+ * clk_register_divider_table - register a table based divider clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+struct clk *clk_register_divider_table(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ return _register_divider(dev, name, parent_name, flags, reg, shift,
+ width, clk_divider_flags, table, lock);
+}
+EXPORT_SYMBOL_GPL(clk_register_divider_table);
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
new file mode 100644
index 00000000000..bac2ddf49d0
--- /dev/null
+++ b/drivers/clk/clk-efm32gg.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/efm32-cmu.h>
+
+#define CMU_HFPERCLKEN0 0x44
+
+static struct clk *clk[37];
+static struct clk_onecell_data clk_data = {
+ .clks = clk,
+ .clk_num = ARRAY_SIZE(clk),
+};
+
+static int __init efm32gg_cmu_init(struct device_node *np)
+{
+ int i;
+ void __iomem *base;
+
+ for (i = 0; i < ARRAY_SIZE(clk); ++i)
+ clk[i] = ERR_PTR(-ENOENT);
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("Failed to map address range for efm32gg,cmu node\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL,
+ CLK_IS_ROOT, 48000000);
+
+ clk[clk_HFPERCLKUSART0] = clk_register_gate(NULL, "HFPERCLK.USART0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 0, 0, NULL);
+ clk[clk_HFPERCLKUSART1] = clk_register_gate(NULL, "HFPERCLK.USART1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 1, 0, NULL);
+ clk[clk_HFPERCLKUSART2] = clk_register_gate(NULL, "HFPERCLK.USART2",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 2, 0, NULL);
+ clk[clk_HFPERCLKUART0] = clk_register_gate(NULL, "HFPERCLK.UART0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 3, 0, NULL);
+ clk[clk_HFPERCLKUART1] = clk_register_gate(NULL, "HFPERCLK.UART1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 4, 0, NULL);
+ clk[clk_HFPERCLKTIMER0] = clk_register_gate(NULL, "HFPERCLK.TIMER0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 5, 0, NULL);
+ clk[clk_HFPERCLKTIMER1] = clk_register_gate(NULL, "HFPERCLK.TIMER1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 6, 0, NULL);
+ clk[clk_HFPERCLKTIMER2] = clk_register_gate(NULL, "HFPERCLK.TIMER2",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 7, 0, NULL);
+ clk[clk_HFPERCLKTIMER3] = clk_register_gate(NULL, "HFPERCLK.TIMER3",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 8, 0, NULL);
+ clk[clk_HFPERCLKACMP0] = clk_register_gate(NULL, "HFPERCLK.ACMP0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 9, 0, NULL);
+ clk[clk_HFPERCLKACMP1] = clk_register_gate(NULL, "HFPERCLK.ACMP1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 10, 0, NULL);
+ clk[clk_HFPERCLKI2C0] = clk_register_gate(NULL, "HFPERCLK.I2C0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 11, 0, NULL);
+ clk[clk_HFPERCLKI2C1] = clk_register_gate(NULL, "HFPERCLK.I2C1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 12, 0, NULL);
+ clk[clk_HFPERCLKGPIO] = clk_register_gate(NULL, "HFPERCLK.GPIO",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 13, 0, NULL);
+ clk[clk_HFPERCLKVCMP] = clk_register_gate(NULL, "HFPERCLK.VCMP",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 14, 0, NULL);
+ clk[clk_HFPERCLKPRS] = clk_register_gate(NULL, "HFPERCLK.PRS",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 15, 0, NULL);
+ clk[clk_HFPERCLKADC0] = clk_register_gate(NULL, "HFPERCLK.ADC0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 16, 0, NULL);
+ clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
+
+ return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
new file mode 100644
index 00000000000..d9e3f671c2e
--- /dev/null
+++ b/drivers/clk/clk-fixed-factor.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Standard functionality for the common clock API.
+ */
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+
+/*
+ * DOC: basic fixed multiplier and divider clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is fixed. clk->rate = parent->rate / div * mult
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
+
+static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+ unsigned long long int rate;
+
+ rate = (unsigned long long int)parent_rate * fix->mult;
+ do_div(rate, fix->div);
+ return (unsigned long)rate;
+}
+
+static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+ if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ unsigned long best_parent;
+
+ best_parent = (rate / fix->mult) * fix->div;
+ *prate = __clk_round_rate(__clk_get_parent(hw->clk),
+ best_parent);
+ }
+
+ return (*prate / fix->div) * fix->mult;
+}
+
+static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+struct clk_ops clk_fixed_factor_ops = {
+ .round_rate = clk_factor_round_rate,
+ .set_rate = clk_factor_set_rate,
+ .recalc_rate = clk_factor_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
+
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ struct clk_fixed_factor *fix;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ fix = kmalloc(sizeof(*fix), GFP_KERNEL);
+ if (!fix) {
+ pr_err("%s: could not allocate fixed factor clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_fixed_factor assignments */
+ fix->mult = mult;
+ fix->div = div;
+ fix->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(dev, &fix->hw);
+
+ if (IS_ERR(clk))
+ kfree(fix);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(clk_register_fixed_factor);
+
+#ifdef CONFIG_OF
+/**
+ * of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
+ */
+void __init of_fixed_factor_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ u32 div, mult;
+
+ if (of_property_read_u32(node, "clock-div", &div)) {
+ pr_err("%s Fixed factor clock <%s> must have a clock-div property\n",
+ __func__, node->name);
+ return;
+ }
+
+ if (of_property_read_u32(node, "clock-mult", &mult)) {
+ pr_err("%s Fixed factor clock <%s> must have a clock-mult property\n",
+ __func__, node->name);
+ return;
+ }
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0,
+ mult, div);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+EXPORT_SYMBOL_GPL(of_fixed_factor_clk_setup);
+CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
+ of_fixed_factor_clk_setup);
+#endif
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
new file mode 100644
index 00000000000..0fc56ab6e84
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Fixed rate clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+
+/*
+ * DOC: basic fixed-rate clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parents are prepared
+ * enable - clk_enable only ensures parents are enabled
+ * rate - rate is always a fixed value. No clk_set_rate support
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+
+static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return to_clk_fixed_rate(hw)->fixed_rate;
+}
+
+static unsigned long clk_fixed_rate_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_accuracy)
+{
+ return to_clk_fixed_rate(hw)->fixed_accuracy;
+}
+
+const struct clk_ops clk_fixed_rate_ops = {
+ .recalc_rate = clk_fixed_rate_recalc_rate,
+ .recalc_accuracy = clk_fixed_rate_recalc_accuracy,
+};
+EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
+
+/**
+ * clk_register_fixed_rate_with_accuracy - register fixed-rate clock with the
+ * clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock rate
+ */
+struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate fixed-rate clock */
+ fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
+ if (!fixed) {
+ pr_err("%s: could not allocate fixed clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &clk_fixed_rate_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_fixed_rate assignments */
+ fixed->fixed_rate = fixed_rate;
+ fixed->fixed_accuracy = fixed_accuracy;
+ fixed->hw.init = &init;
+
+ /* register the clock */
+ clk = clk_register(dev, &fixed->hw);
+ if (IS_ERR(clk))
+ kfree(fixed);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(clk_register_fixed_rate_with_accuracy);
+
+/**
+ * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate)
+{
+ return clk_register_fixed_rate_with_accuracy(dev, name, parent_name,
+ flags, fixed_rate, 0);
+}
+EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
+
+#ifdef CONFIG_OF
+/**
+ * of_fixed_clk_setup() - Setup function for simple fixed rate clock
+ */
+void of_fixed_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ u32 rate;
+ u32 accuracy = 0;
+
+ if (of_property_read_u32(node, "clock-frequency", &rate))
+ return;
+
+ of_property_read_u32(node, "clock-accuracy", &accuracy);
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
+ CLK_IS_ROOT, rate,
+ accuracy);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+EXPORT_SYMBOL_GPL(of_fixed_clk_setup);
+CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
+#endif
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
new file mode 100644