diff options
Diffstat (limited to 'drivers/clk/st')
| -rw-r--r-- | drivers/clk/st/Makefile | 1 | ||||
| -rw-r--r-- | drivers/clk/st/clkgen-fsyn.c | 1039 | ||||
| -rw-r--r-- | drivers/clk/st/clkgen-mux.c | 820 | ||||
| -rw-r--r-- | drivers/clk/st/clkgen-pll.c | 701 | ||||
| -rw-r--r-- | drivers/clk/st/clkgen.h | 48 | 
5 files changed, 2609 insertions, 0 deletions
diff --git a/drivers/clk/st/Makefile b/drivers/clk/st/Makefile new file mode 100644 index 00000000000..c7455ffdbdf --- /dev/null +++ b/drivers/clk/st/Makefile @@ -0,0 +1 @@ +obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c new file mode 100644 index 00000000000..4f53ee0778d --- /dev/null +++ b/drivers/clk/st/clkgen-fsyn.c @@ -0,0 +1,1039 @@ +/* + * Copyright (C) 2014 STMicroelectronics R&D Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/* + * Authors: + * Stephen Gallimore <stephen.gallimore@st.com>, + * Pankaj Dev <pankaj.dev@st.com>. + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +#include "clkgen.h" + +/* + * Maximum input clock to the PLL before we divide it down by 2 + * although in reality in actual systems this has never been seen to + * be used. + */ +#define QUADFS_NDIV_THRESHOLD 30000000 + +#define PLL_BW_GOODREF   (0L) +#define PLL_BW_VBADREF   (1L) +#define PLL_BW_BADREF    (2L) +#define PLL_BW_VGOODREF  (3L) + +#define QUADFS_MAX_CHAN 4 + +struct stm_fs { +	unsigned long ndiv; +	unsigned long mdiv; +	unsigned long pe; +	unsigned long sdiv; +	unsigned long nsdiv; +}; + +static struct stm_fs fs216c65_rtbl[] = { +	{ .mdiv = 0x1f, .pe = 0x0,	.sdiv = 0x7,	.nsdiv = 0 },	/* 312.5 Khz */ +	{ .mdiv = 0x17, .pe = 0x25ed,	.sdiv = 0x1,	.nsdiv = 0 },	/* 27    MHz */ +	{ .mdiv = 0x1a, .pe = 0x7b36,	.sdiv = 0x2,	.nsdiv = 1 },	/* 36.87 MHz */ +	{ .mdiv = 0x13, .pe = 0x0,	.sdiv = 0x2,	.nsdiv = 1 },	/* 48    MHz */ +	{ .mdiv = 0x11, .pe = 0x1c72,	.sdiv = 0x1,	.nsdiv = 1 },	/* 108   MHz */ +}; + +static struct stm_fs fs432c65_rtbl[] = { +	{ .mdiv = 0x1f, .pe = 0x0,	.sdiv = 0x7,	.nsdiv = 0 },	/* 625   Khz */ +	{ .mdiv = 0x11, .pe = 0x1c72,	.sdiv = 0x2,	.nsdiv = 1 },	/* 108   MHz */ +	{ .mdiv = 0x19, .pe = 0x121a,	.sdiv = 0x0,	.nsdiv = 1 },	/* 297   MHz */ +}; + +static struct stm_fs fs660c32_rtbl[] = { +	{ .mdiv = 0x01, .pe = 0x2aaa,	.sdiv = 0x8,	.nsdiv = 0 },	/* 600   KHz */ +	{ .mdiv = 0x02, .pe = 0x3d33,	.sdiv = 0x0,	.nsdiv = 0 },	/* 148.5 Mhz */ +	{ .mdiv = 0x13, .pe = 0x5bcc,	.sdiv = 0x0,	.nsdiv = 1 },	/* 297   Mhz */ +	{ .mdiv = 0x0e, .pe = 0x1025,	.sdiv = 0x0,	.nsdiv = 1 },	/* 333   Mhz */ +	{ .mdiv = 0x0b, .pe = 0x715f,	.sdiv = 0x0,	.nsdiv = 1 },	/* 350   Mhz */ +}; + +struct clkgen_quadfs_data { +	bool reset_present; +	bool bwfilter_present; +	bool lockstatus_present; +	bool nsdiv_present; +	struct clkgen_field ndiv; +	struct clkgen_field ref_bw; +	struct clkgen_field nreset; +	struct clkgen_field npda; +	struct clkgen_field lock_status; + +	struct clkgen_field nsb[QUADFS_MAX_CHAN]; +	struct clkgen_field en[QUADFS_MAX_CHAN]; +	struct clkgen_field mdiv[QUADFS_MAX_CHAN]; +	struct clkgen_field pe[QUADFS_MAX_CHAN]; +	struct clkgen_field sdiv[QUADFS_MAX_CHAN]; +	struct clkgen_field nsdiv[QUADFS_MAX_CHAN]; + +	const struct clk_ops *pll_ops; +	struct stm_fs *rtbl; +	u8 rtbl_cnt; +	int  (*get_rate)(unsigned long , struct stm_fs *, +			unsigned long *); +}; + +static const struct clk_ops st_quadfs_pll_c65_ops; +static const struct clk_ops st_quadfs_pll_c32_ops; +static const struct clk_ops st_quadfs_fs216c65_ops; +static const struct clk_ops st_quadfs_fs432c65_ops; +static const struct clk_ops st_quadfs_fs660c32_ops; + +static int clk_fs216c65_get_rate(unsigned long, struct stm_fs *, +		unsigned long *); +static int clk_fs432c65_get_rate(unsigned long, struct stm_fs *, +		unsigned long *); +static int clk_fs660c32_dig_get_rate(unsigned long, struct stm_fs *, +		unsigned long *); +/* + * Values for all of the standalone instances of this clock + * generator found in STiH415 and STiH416 SYSCFG register banks. Note + * that the individual channel standby control bits (nsb) are in the + * first register along with the PLL control bits. + */ +static struct clkgen_quadfs_data st_fs216c65_416 = { +	/* 416 specific */ +	.npda	= CLKGEN_FIELD(0x0, 0x1, 14), +	.nsb	= { CLKGEN_FIELD(0x0, 0x1, 10), +		    CLKGEN_FIELD(0x0, 0x1, 11), +		    CLKGEN_FIELD(0x0, 0x1, 12), +		    CLKGEN_FIELD(0x0, 0x1, 13) }, +	.nsdiv_present = true, +	.nsdiv	= { CLKGEN_FIELD(0x0, 0x1, 18), +		    CLKGEN_FIELD(0x0, 0x1, 19), +		    CLKGEN_FIELD(0x0, 0x1, 20), +		    CLKGEN_FIELD(0x0, 0x1, 21) }, +	.mdiv	= { CLKGEN_FIELD(0x4, 0x1f, 0), +		    CLKGEN_FIELD(0x14, 0x1f, 0), +		    CLKGEN_FIELD(0x24, 0x1f, 0), +		    CLKGEN_FIELD(0x34, 0x1f, 0) }, +	.en	= { CLKGEN_FIELD(0x10, 0x1, 0), +		    CLKGEN_FIELD(0x20, 0x1, 0), +		    CLKGEN_FIELD(0x30, 0x1, 0), +		    CLKGEN_FIELD(0x40, 0x1, 0) }, +	.ndiv	= CLKGEN_FIELD(0x0, 0x1, 15), +	.bwfilter_present = true, +	.ref_bw = CLKGEN_FIELD(0x0, 0x3, 16), +	.pe	= { CLKGEN_FIELD(0x8, 0xffff, 0), +		    CLKGEN_FIELD(0x18, 0xffff, 0), +		    CLKGEN_FIELD(0x28, 0xffff, 0), +		    CLKGEN_FIELD(0x38, 0xffff, 0) }, +	.sdiv	= { CLKGEN_FIELD(0xC, 0x7, 0), +		    CLKGEN_FIELD(0x1C, 0x7, 0), +		    CLKGEN_FIELD(0x2C, 0x7, 0), +		    CLKGEN_FIELD(0x3C, 0x7, 0) }, +	.pll_ops	= &st_quadfs_pll_c65_ops, +	.rtbl		= fs216c65_rtbl, +	.rtbl_cnt	= ARRAY_SIZE(fs216c65_rtbl), +	.get_rate	= clk_fs216c65_get_rate, +}; + +static struct clkgen_quadfs_data st_fs432c65_416 = { +	.npda	= CLKGEN_FIELD(0x0, 0x1, 14), +	.nsb	= { CLKGEN_FIELD(0x0, 0x1, 10), +		    CLKGEN_FIELD(0x0, 0x1, 11), +		    CLKGEN_FIELD(0x0, 0x1, 12), +		    CLKGEN_FIELD(0x0, 0x1, 13) }, +	.nsdiv_present = true, +	.nsdiv	= { CLKGEN_FIELD(0x0, 0x1, 18), +		   CLKGEN_FIELD(0x0, 0x1, 19), +		   CLKGEN_FIELD(0x0, 0x1, 20), +		   CLKGEN_FIELD(0x0, 0x1, 21) }, +	.mdiv	= { CLKGEN_FIELD(0x4, 0x1f, 0), +		    CLKGEN_FIELD(0x14, 0x1f, 0), +		    CLKGEN_FIELD(0x24, 0x1f, 0), +		    CLKGEN_FIELD(0x34, 0x1f, 0) }, +	.en	= { CLKGEN_FIELD(0x10, 0x1, 0), +		    CLKGEN_FIELD(0x20, 0x1, 0), +		    CLKGEN_FIELD(0x30, 0x1, 0), +		    CLKGEN_FIELD(0x40, 0x1, 0) }, +	.ndiv	= CLKGEN_FIELD(0x0, 0x1, 15), +	.bwfilter_present = true, +	.ref_bw = CLKGEN_FIELD(0x0, 0x3, 16), +	.pe	= { CLKGEN_FIELD(0x8, 0xffff, 0), +		    CLKGEN_FIELD(0x18, 0xffff, 0), +		    CLKGEN_FIELD(0x28, 0xffff, 0), +		    CLKGEN_FIELD(0x38, 0xffff, 0) }, +	.sdiv	= { CLKGEN_FIELD(0xC, 0x7, 0), +		    CLKGEN_FIELD(0x1C, 0x7, 0), +		    CLKGEN_FIELD(0x2C, 0x7, 0), +		    CLKGEN_FIELD(0x3C, 0x7, 0) }, +	.pll_ops	= &st_quadfs_pll_c65_ops, +	.rtbl		= fs432c65_rtbl, +	.rtbl_cnt	= ARRAY_SIZE(fs432c65_rtbl), +	.get_rate	= clk_fs432c65_get_rate, +}; + +static struct clkgen_quadfs_data st_fs660c32_E_416 = { +	.npda	= CLKGEN_FIELD(0x0, 0x1, 14), +	.nsb	= { CLKGEN_FIELD(0x0, 0x1, 10), +		    CLKGEN_FIELD(0x0, 0x1, 11), +		    CLKGEN_FIELD(0x0, 0x1, 12), +		    CLKGEN_FIELD(0x0, 0x1, 13) }, +	.nsdiv_present = true, +	.nsdiv	= { CLKGEN_FIELD(0x0, 0x1, 18), +		    CLKGEN_FIELD(0x0, 0x1, 19), +		    CLKGEN_FIELD(0x0, 0x1, 20), +		    CLKGEN_FIELD(0x0, 0x1, 21) }, +	.mdiv	= { CLKGEN_FIELD(0x4, 0x1f, 0), +		    CLKGEN_FIELD(0x14, 0x1f, 0), +		    CLKGEN_FIELD(0x24, 0x1f, 0), +		    CLKGEN_FIELD(0x34, 0x1f, 0) }, +	.en	= { CLKGEN_FIELD(0x10, 0x1, 0), +		    CLKGEN_FIELD(0x20, 0x1, 0), +		    CLKGEN_FIELD(0x30, 0x1, 0), +		    CLKGEN_FIELD(0x40, 0x1, 0) }, +	.ndiv	= CLKGEN_FIELD(0x0, 0x7, 15), +	.pe	= { CLKGEN_FIELD(0x8, 0x7fff, 0), +		    CLKGEN_FIELD(0x18, 0x7fff, 0), +		    CLKGEN_FIELD(0x28, 0x7fff, 0), +		    CLKGEN_FIELD(0x38, 0x7fff, 0) }, +	.sdiv	= { CLKGEN_FIELD(0xC, 0xf, 0), +		    CLKGEN_FIELD(0x1C, 0xf, 0), +		    CLKGEN_FIELD(0x2C, 0xf, 0), +		    CLKGEN_FIELD(0x3C, 0xf, 0) }, +	.lockstatus_present = true, +	.lock_status = CLKGEN_FIELD(0xAC, 0x1, 0), +	.pll_ops	= &st_quadfs_pll_c32_ops, +	.rtbl		= fs660c32_rtbl, +	.rtbl_cnt	= ARRAY_SIZE(fs660c32_rtbl), +	.get_rate	= clk_fs660c32_dig_get_rate, +}; + +static struct clkgen_quadfs_data st_fs660c32_F_416 = { +	.npda	= CLKGEN_FIELD(0x0, 0x1, 14), +	.nsb	= { CLKGEN_FIELD(0x0, 0x1, 10), +		    CLKGEN_FIELD(0x0, 0x1, 11), +		    CLKGEN_FIELD(0x0, 0x1, 12), +		    CLKGEN_FIELD(0x0, 0x1, 13) }, +	.nsdiv_present = true, +	.nsdiv	= { CLKGEN_FIELD(0x0, 0x1, 18), +		    CLKGEN_FIELD(0x0, 0x1, 19), +		    CLKGEN_FIELD(0x0, 0x1, 20), +		    CLKGEN_FIELD(0x0, 0x1, 21) }, +	.mdiv	= { CLKGEN_FIELD(0x4, 0x1f, 0), +		    CLKGEN_FIELD(0x14, 0x1f, 0), +		    CLKGEN_FIELD(0x24, 0x1f, 0), +		    CLKGEN_FIELD(0x34, 0x1f, 0) }, +	.en	= { CLKGEN_FIELD(0x10, 0x1, 0), +		    CLKGEN_FIELD(0x20, 0x1, 0), +		    CLKGEN_FIELD(0x30, 0x1, 0), +		    CLKGEN_FIELD(0x40, 0x1, 0) }, +	.ndiv	= CLKGEN_FIELD(0x0, 0x7, 15), +	.pe	= { CLKGEN_FIELD(0x8, 0x7fff, 0), +		    CLKGEN_FIELD(0x18, 0x7fff, 0), +		    CLKGEN_FIELD(0x28, 0x7fff, 0), +		    CLKGEN_FIELD(0x38, 0x7fff, 0) }, +	.sdiv	= { CLKGEN_FIELD(0xC, 0xf, 0), +		    CLKGEN_FIELD(0x1C, 0xf, 0), +		    CLKGEN_FIELD(0x2C, 0xf, 0), +		    CLKGEN_FIELD(0x3C, 0xf, 0) }, +	.lockstatus_present = true, +	.lock_status = CLKGEN_FIELD(0xEC, 0x1, 0), +	.pll_ops	= &st_quadfs_pll_c32_ops, +	.rtbl		= fs660c32_rtbl, +	.rtbl_cnt	= ARRAY_SIZE(fs660c32_rtbl), +	.get_rate	= clk_fs660c32_dig_get_rate, +}; + +/** + * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional & control the Fsyn + * rate - inherits rate from parent. set_rate/round_rate/recalc_rate + * parent - fixed parent.  No clk_set_parent support + */ + +/** + * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of + *                                  its parent clock, found inside a type of + *                                  ST quad channel frequency synthesizer block + * + * @hw: handle between common and hardware-specific interfaces. + * @ndiv: regmap field for the ndiv control. + * @regs_base: base address of the configuration registers. + * @lock: spinlock. + * + */ +struct st_clk_quadfs_pll { +	struct clk_hw	hw; +	void __iomem	*regs_base; +	spinlock_t	*lock; +	struct clkgen_quadfs_data *data; +	u32 ndiv; +}; + +#define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw) + +static int quadfs_pll_enable(struct clk_hw *hw) +{ +	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); +	unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10); + +	if (pll->lock) +		spin_lock_irqsave(pll->lock, flags); + +	/* +	 * Bring block out of reset if we have reset control. +	 */ +	if (pll->data->reset_present) +		CLKGEN_WRITE(pll, nreset, 1); + +	/* +	 * Use a fixed input clock noise bandwidth filter for the moment +	 */ +	if (pll->data->bwfilter_present) +		CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF); + + +	CLKGEN_WRITE(pll, ndiv, pll->ndiv); + +	/* +	 * Power up the PLL +	 */ +	CLKGEN_WRITE(pll, npda, 1); + +	if (pll->lock) +		spin_unlock_irqrestore(pll->lock, flags); + +	if (pll->data->lockstatus_present) +		while (!CLKGEN_READ(pll, lock_status)) { +			if (time_after(jiffies, timeout)) +				return -ETIMEDOUT; +			cpu_relax(); +		} + +	return 0; +} + +static void quadfs_pll_disable(struct clk_hw *hw) +{ +	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); +	unsigned long flags = 0; + +	if (pll->lock) +		spin_lock_irqsave(pll->lock, flags); + +	/* +	 * Powerdown the PLL and then put block into soft reset if we have +	 * reset control. +	 */ +	CLKGEN_WRITE(pll, npda, 0); + +	if (pll->data->reset_present) +		CLKGEN_WRITE(pll, nreset, 0); + +	if (pll->lock) +		spin_unlock_irqrestore(pll->lock, flags); +} + +static int quadfs_pll_is_enabled(struct clk_hw *hw) +{ +	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); +	u32 npda = CLKGEN_READ(pll, npda); + +	return !!npda; +} + +int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, +			   unsigned long *rate) +{ +	unsigned long nd = fs->ndiv + 16; /* ndiv value */ + +	*rate = input * nd; + +	return 0; +} + +static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw, +					unsigned long parent_rate) +{ +	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); +	unsigned long rate = 0; +	struct stm_fs params; + +	params.ndiv = CLKGEN_READ(pll, ndiv); +	if (clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &rate)) +		pr_err("%s:%s error calculating rate\n", +		       __clk_get_name(hw->clk), __func__); + +	pll->ndiv = params.ndiv; + +	return rate; +} + +int clk_fs660c32_vco_get_params(unsigned long input, +				unsigned long output, struct stm_fs *fs) +{ +/* Formula +   VCO frequency = (fin x ndiv) / pdiv +   ndiv = VCOfreq * pdiv / fin +   */ +	unsigned long pdiv = 1, n; + +	/* Output clock range: 384Mhz to 660Mhz */ +	if (output < 384000000 || output > 660000000) +		return -EINVAL; + +	if (input > 40000000) +		/* This means that PDIV would be 2 instead of 1. +		   Not supported today. */ +		return -EINVAL; + +	input /= 1000; +	output /= 1000; + +	n = output * pdiv / input; +	if (n < 16) +		n = 16; +	fs->ndiv = n - 16; /* Converting formula value to reg value */ + +	return 0; +} + +static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, unsigned long rate +		, unsigned long *prate) +{ +	struct stm_fs params; + +	if (!clk_fs660c32_vco_get_params(*prate, rate, ¶ms)) +		clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate); + +	pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", +		 __func__, __clk_get_name(hw->clk), +		 rate, (unsigned int)params.sdiv, +		 (unsigned int)params.mdiv, +		 (unsigned int)params.pe, (unsigned int)params.nsdiv); + +	return rate; +} + +static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate, +				unsigned long parent_rate) +{ +	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); +	struct stm_fs params; +	long hwrate = 0; +	unsigned long flags = 0; + +	if (!rate || !parent_rate) +		return -EINVAL; + +	if (!clk_fs660c32_vco_get_params(parent_rate, rate, ¶ms)) +		clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &hwrate); + +	pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n", +		 __func__, __clk_get_name(hw->clk), +		 hwrate, (unsigned int)params.ndiv); + +	if (!hwrate) +		return -EINVAL; + +	pll->ndiv = params.ndiv; + +	if (pll->lock) +		spin_lock_irqsave(pll->lock, flags); + +	CLKGEN_WRITE(pll, ndiv, pll->ndiv); + +	if (pll->lock) +		spin_unlock_irqrestore(pll->lock, flags); + +	return 0; +} + +static const struct clk_ops st_quadfs_pll_c65_ops = { +	.enable		= quadfs_pll_enable, +	.disable	= quadfs_pll_disable, +	.is_enabled	= quadfs_pll_is_enabled, +}; + +static const struct clk_ops st_quadfs_pll_c32_ops = { +	.enable		= quadfs_pll_enable, +	.disable	= quadfs_pll_disable, +	.is_enabled	= quadfs_pll_is_enabled, +	.recalc_rate	= quadfs_pll_fs660c32_recalc_rate, +	.round_rate	= quadfs_pll_fs660c32_round_rate, +	.set_rate	= quadfs_pll_fs660c32_set_rate, +}; + +static struct clk * __init st_clk_register_quadfs_pll( +		const char *name, const char *parent_name, +		struct clkgen_quadfs_data *quadfs, void __iomem *reg, +		spinlock_t *lock) +{ +	struct st_clk_quadfs_pll *pll; +	struct clk *clk; +	struct clk_init_data init; + +	/* +	 * Sanity check required pointers. +	 */ +	if (WARN_ON(!name || !parent_name)) +		return ERR_PTR(-EINVAL); + +	pll = kzalloc(sizeof(*pll), GFP_KERNEL); +	if (!pll) +		return ERR_PTR(-ENOMEM); + +	init.name = name; +	init.ops = quadfs->pll_ops; +	init.flags = CLK_IS_BASIC; +	init.parent_names = &parent_name; +	init.num_parents = 1; + +	pll->data = quadfs; +	pll->regs_base = reg; +	pll->lock = lock; +	pll->hw.init = &init; + +	clk = clk_register(NULL, &pll->hw); + +	if (IS_ERR(clk)) +		kfree(pll); + +	return clk; +} + +/** + * DOC: A digital frequency synthesizer + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional + * rate - set rate is functional + * parent - fixed parent.  No clk_set_parent support + */ + +/** + * struct st_clk_quadfs_fsynth - One clock output from a four channel digital + *                                  frequency synthesizer (fsynth) block. + * + * @hw: handle between common and hardware-specific interfaces + * + * @nsb: regmap field in the output control register for the digital + *       standby of this fsynth channel. This control is active low so + *       the channel is in standby when the control bit is cleared. + * + * @nsdiv: regmap field in the output control register for + *          for the optional divide by 3 of this fsynth channel. This control + *          is active low so the divide by 3 is active when the control bit is + *          cleared and the divide is bypassed when the bit is set. + */ +struct st_clk_quadfs_fsynth { +	struct clk_hw	hw; +	void __iomem	*regs_base; +	spinlock_t	*lock; +	struct clkgen_quadfs_data *data; + +	u32 chan; +	/* +	 * Cached hardware values from set_rate so we can program the +	 * hardware in enable. There are two reasons for this: +	 * +	 *  1. The registers may not be writable until the parent has been +	 *     enabled. +	 * +	 *  2. It restores the clock rate when a driver does an enable +	 *     on PM restore, after a suspend to RAM has lost the hardware +	 *     setup. +	 */ +	u32 md; +	u32 pe; +	u32 sdiv; +	u32 nsdiv; +}; + +#define to_quadfs_fsynth(_hw) \ +	container_of(_hw, struct st_clk_quadfs_fsynth, hw) + +static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs) +{ +	/* +	 * Pulse the program enable register lsb to make the hardware take +	 * notice of the new md/pe values with a glitchless transition. +	 */ +	CLKGEN_WRITE(fs, en[fs->chan], 1); +	CLKGEN_WRITE(fs, en[fs->chan], 0); +} + +static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs) +{ +	unsigned long flags = 0; + +	/* +	 * Ensure the md/pe parameters are ignored while we are +	 * reprogramming them so we can get a glitchless change +	 * when fine tuning the speed of a running clock. +	 */ +	CLKGEN_WRITE(fs, en[fs->chan], 0); + +	CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md); +	CLKGEN_WRITE(fs, pe[fs->chan], fs->pe); +	CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv); + +	if (fs->lock) +		spin_lock_irqsave(fs->lock, flags); + +	if (fs->data->nsdiv_present) +		CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv); + +	if (fs->lock) +		spin_unlock_irqrestore(fs->lock, flags); +} + +static int quadfs_fsynth_enable(struct clk_hw *hw) +{ +	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); +	unsigned long flags = 0; + +	pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk)); + +	quadfs_fsynth_program_rate(fs); + +	if (fs->lock) +		spin_lock_irqsave(fs->lock, flags); + +	CLKGEN_WRITE(fs, nsb[fs->chan], 1); + +	if (fs->lock) +		spin_unlock_irqrestore(fs->lock, flags); + +	quadfs_fsynth_program_enable(fs); + +	return 0; +} + +static void quadfs_fsynth_disable(struct clk_hw *hw) +{ +	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); +	unsigned long flags = 0; + +	pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk)); + +	if (fs->lock) +		spin_lock_irqsave(fs->lock, flags); + +	CLKGEN_WRITE(fs, nsb[fs->chan], 0); + +	if (fs->lock) +		spin_unlock_irqrestore(fs->lock, flags); +} + +static int quadfs_fsynth_is_enabled(struct clk_hw *hw) +{ +	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); +	u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]); + +	pr_debug("%s: %s enable bit = 0x%x\n", +		 __func__, __clk_get_name(hw->clk), nsb); + +	return !!nsb; +} + +#define P15			(uint64_t)(1 << 15) + +static int clk_fs216c65_get_rate(unsigned long input, struct stm_fs *fs, +		unsigned long *rate) +{ +	uint64_t res; +	unsigned long ns; +	unsigned long nd = 8; /* ndiv stuck at 0 => val = 8 */ +	unsigned long s; +	long m; + +	m = fs->mdiv - 32; +	s = 1 << (fs->sdiv + 1); +	ns = (fs->nsdiv ? 1 : 3); + +	res = (uint64_t)(s * ns * P15 * (uint64_t)(m + 33)); +	res = res - (s * ns * fs->pe); +	*rate = div64_u64(P15 * nd * input * 32, res); + +	return 0; +} + +static int clk_fs432c65_get_rate(unsigned long input, struct stm_fs *fs, +		unsigned long *rate) +{ +	uint64_t res; +	unsigned long nd = 16; /* ndiv value; stuck at 0 (30Mhz input) */ +	long m; +	unsigned long sd; +	unsigned long ns; + +	m = fs->mdiv - 32; +	sd = 1 << (fs->sdiv + 1); +	ns = (fs->nsdiv ? 1 : 3); + +	res = (uint64_t)(sd * ns * P15 * (uint64_t)(m + 33)); +	res = res - (sd * ns * fs->pe); +	*rate = div64_u64(P15 * nd * input * 32, res); + +	return 0; +} + +#define P20		(uint64_t)(1 << 20) + +static int clk_fs660c32_dig_get_rate(unsigned long input, +				struct stm_fs *fs, unsigned long *rate) +{ +	unsigned long s = (1 << fs->sdiv); +	unsigned long ns; +	uint64_t res; + +	/* +	 * 'nsdiv' is a register value ('BIN') which is translated +	 * to a decimal value according to following rules. +	 * +	 *     nsdiv      ns.dec +	 *       0        3 +	 *       1        1 +	 */ +	ns = (fs->nsdiv == 1) ? 1 : 3; + +	res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns; +	*rate = (unsigned long)div64_u64(input * P20 * 32, res); + +	return 0; +} + +static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs, +		struct stm_fs *params) +{ +	/* +	 * Get the initial hardware values for recalc_rate +	 */ +	params->mdiv	= CLKGEN_READ(fs, mdiv[fs->chan]); +	params->pe	= CLKGEN_READ(fs, pe[fs->chan]); +	params->sdiv	= CLKGEN_READ(fs, sdiv[fs->chan]); + +	if (fs->data->nsdiv_present) +		params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]); +	else +		params->nsdiv = 1; + +	/* +	 * If All are NULL then assume no clock rate is programmed. +	 */ +	if (!params->mdiv && !params->pe && !params->sdiv) +		return 1; + +	fs->md = params->mdiv; +	fs->pe = params->pe; +	fs->sdiv = params->sdiv; +	fs->nsdiv = params->nsdiv; + +	return 0; +} + +static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate, +				unsigned long prate, struct stm_fs *params) +{ +	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); +	int (*clk_fs_get_rate)(unsigned long , +				struct stm_fs *, unsigned long *); +	struct stm_fs prev_params; +	unsigned long prev_rate, rate = 0; +	unsigned long diff_rate, prev_diff_rate = ~0; +	int index; + +	clk_fs_get_rate = fs->data->get_rate; + +	for (index = 0; index < fs->data->rtbl_cnt; index++) { +		prev_rate = rate; + +		*params = fs->data->rtbl[index]; +		prev_params = *params; + +		clk_fs_get_rate(prate, &fs->data->rtbl[index], &rate); + +		diff_rate = abs(drate - rate); + +		if (diff_rate > prev_diff_rate) { +			rate = prev_rate; +			*params = prev_params; +			break; +		} + +		prev_diff_rate = diff_rate; + +		if (drate == rate) +			return rate; +	} + + +	if (index == fs->data->rtbl_cnt) +		*params = prev_params; + +	return rate; +} + +static unsigned long quadfs_recalc_rate(struct clk_hw *hw, +		unsigned long parent_rate) +{ +	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); +	unsigned long rate = 0; +	struct stm_fs params; +	int (*clk_fs_get_rate)(unsigned long , +				struct stm_fs *, unsigned long *); + +	clk_fs_get_rate = fs->data->get_rate; + +	if (quadfs_fsynt_get_hw_value_for_recalc(fs, ¶ms)) +		return 0; + +	if (clk_fs_get_rate(parent_rate, ¶ms, &rate)) { +		pr_err("%s:%s error calculating rate\n", +		       __clk_get_name(hw->clk), __func__); +	} + +	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + +	return rate; +} + +static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate, +				     unsigned long *prate) +{ +	struct stm_fs params; + +	rate = quadfs_find_best_rate(hw, rate, *prate, ¶ms); + +	pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", +		 __func__, __clk_get_name(hw->clk), +		 rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv, +			 (unsigned int)params.pe, (unsigned int)params.nsdiv); + +	return rate; +} + + +static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs, +		struct stm_fs *params) +{ +	fs->md = params->mdiv; +	fs->pe = params->pe; +	fs->sdiv = params->sdiv; +	fs->nsdiv = params->nsdiv; + +	/* +	 * In some integrations you can only change the fsynth programming when +	 * the parent entity containing it is enabled. +	 */ +	quadfs_fsynth_program_rate(fs); +	quadfs_fsynth_program_enable(fs); +} + +static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate, +				  unsigned long parent_rate) +{ +	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); +	struct stm_fs params; +	long hwrate; +	int uninitialized_var(i); + +	if (!rate || !parent_rate) +		return -EINVAL; + +	memset(¶ms, 0, sizeof(struct stm_fs)); + +	hwrate = quadfs_find_best_rate(hw, rate, parent_rate, ¶ms); +	if (!hwrate) +		return -EINVAL; + +	quadfs_program_and_enable(fs, ¶ms); + +	return 0; +} + + + +static const struct clk_ops st_quadfs_ops = { +	.enable		= quadfs_fsynth_enable, +	.disable	= quadfs_fsynth_disable, +	.is_enabled	= quadfs_fsynth_is_enabled, +	.round_rate	= quadfs_round_rate, +	.set_rate	= quadfs_set_rate, +	.recalc_rate	= quadfs_recalc_rate, +}; + +static struct clk * __init st_clk_register_quadfs_fsynth( +		const char *name, const char *parent_name, +		struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan, +		spinlock_t *lock) +{ +	struct st_clk_quadfs_fsynth *fs; +	struct clk *clk; +	struct clk_init_data init; + +	/* +	 * Sanity check required pointers, note that nsdiv3 is optional. +	 */ +	if (WARN_ON(!name || !parent_name)) +		return ERR_PTR(-EINVAL); + +	fs = kzalloc(sizeof(*fs), GFP_KERNEL); +	if (!fs) +		return ERR_PTR(-ENOMEM); + +	init.name = name; +	init.ops = &st_quadfs_ops; +	init.flags = CLK_GET_RATE_NOCACHE | CLK_IS_BASIC; +	init.parent_names = &parent_name; +	init.num_parents = 1; + +	fs->data = quadfs; +	fs->regs_base = reg; +	fs->chan = chan; +	fs->lock = lock; +	fs->hw.init = &init; + +	clk = clk_register(NULL, &fs->hw); + +	if (IS_ERR(clk)) +		kfree(fs); + +	return clk; +} + +static struct of_device_id quadfs_of_match[] = { +	{ +		.compatible = "st,stih416-quadfs216", +		.data = (void *)&st_fs216c65_416 +	}, +	{ +		.compatible = "st,stih416-quadfs432", +		.data = (void *)&st_fs432c65_416 +	}, +	{ +		.compatible = "st,stih416-quadfs660-E", +		.data = (void *)&st_fs660c32_E_416 +	}, +	{ +		.compatible = "st,stih416-quadfs660-F", +		.data = (void *)&st_fs660c32_F_416 +	}, +	{} +}; + +static void __init st_of_create_quadfs_fsynths( +		struct device_node *np, const char *pll_name, +		struct clkgen_quadfs_data *quadfs, void __iomem *reg, +		spinlock_t *lock) +{ +	struct clk_onecell_data *clk_data; +	int fschan; + +	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); +	if (!clk_data) +		return; + +	clk_data->clk_num = QUADFS_MAX_CHAN; +	clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *), +				 GFP_KERNEL); + +	if (!clk_data->clks) { +		kfree(clk_data); +		return; +	} + +	for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) { +		struct clk *clk; +		const char *clk_name; + +		if (of_property_read_string_index(np, "clock-output-names", +						  fschan, &clk_name)) { +			break; +		} + +		/* +		 * If we read an empty clock name then the channel is unused +		 */ +		if (*clk_name == '\0') +			continue; + +		clk = st_clk_register_quadfs_fsynth(clk_name, pll_name, +				quadfs, reg, fschan, lock); + +		/* +		 * If there was an error registering this clock output, clean +		 * up and move on to the next one. +		 */ +		if (!IS_ERR(clk)) { +			clk_data->clks[fschan] = clk; +			pr_debug("%s: parent %s rate %u\n", +				__clk_get_name(clk), +				__clk_get_name(clk_get_parent(clk)), +				(unsigned int)clk_get_rate(clk)); +		} +	} + +	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); +} + +static void __init st_of_quadfs_setup(struct device_node *np) +{ +	const struct of_device_id *match; +	struct clk *clk; +	const char *pll_name, *clk_parent_name; +	void __iomem *reg; +	spinlock_t *lock; + +	match = of_match_node(quadfs_of_match, np); +	if (WARN_ON(!match)) +		return; + +	reg = of_iomap(np, 0); +	if (!reg) +		return; + +	clk_parent_name = of_clk_get_parent_name(np, 0); +	if (!clk_parent_name) +		return; + +	pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name); +	if (!pll_name) +		return; + +	lock = kzalloc(sizeof(*lock), GFP_KERNEL); +	if (!lock) +		goto err_exit; + +	spin_lock_init(lock); + +	clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, +			(struct clkgen_quadfs_data *) match->data, reg, lock); +	if (IS_ERR(clk)) +		goto err_exit; +	else +		pr_debug("%s: parent %s rate %u\n", +			__clk_get_name(clk), +			__clk_get_name(clk_get_parent(clk)), +			(unsigned int)clk_get_rate(clk)); + +	st_of_create_quadfs_fsynths(np, pll_name, +				    (struct clkgen_quadfs_data *)match->data, +				    reg, lock); + +err_exit: +	kfree(pll_name); /* No longer need local copy of the PLL name */ +} +CLK_OF_DECLARE(quadfs, "st,quadfs", st_of_quadfs_setup); diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c new file mode 100644 index 00000000000..a329906d1e8 --- /dev/null +++ b/drivers/clk/st/clkgen-mux.c @@ -0,0 +1,820 @@ +/* + * clkgen-mux.c: ST GEN-MUX Clock driver + * + * Copyright (C) 2014 STMicroelectronics (R&D) Limited + * + * Authors: Stephen Gallimore <stephen.gallimore@st.com> + *	    Pankaj Dev <pankaj.dev@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +static DEFINE_SPINLOCK(clkgena_divmux_lock); +static DEFINE_SPINLOCK(clkgenf_lock); + +static const char ** __init clkgen_mux_get_parents(struct device_node *np, +						       int *num_parents) +{ +	const char **parents; +	int nparents, i; + +	nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); +	if (WARN_ON(nparents <= 0)) +		return ERR_PTR(-EINVAL); + +	parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL); +	if (!parents) +		return ERR_PTR(-ENOMEM); + +	for (i = 0; i < nparents; i++) +		parents[i] = of_clk_get_parent_name(np, i); + +	*num_parents = nparents; +	return parents; +} + +/** + * DOC: Clock mux with a programmable divider on each of its three inputs. + *      The mux has an input setting which effectively gates its output. + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional & control gating + * rate - set rate is supported + * parent - set/get parent + */ + +#define NUM_INPUTS 3 + +struct clkgena_divmux { +	struct clk_hw hw; +	/* Subclassed mux and divider structures */ +	struct clk_mux mux; +	struct clk_divider div[NUM_INPUTS]; +	/* Enable/running feedback register bits for each input */ +	void __iomem *feedback_reg[NUM_INPUTS]; +	int feedback_bit_idx; + +	u8              muxsel; +}; + +#define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw) + +struct clkgena_divmux_data { +	int num_outputs; +	int mux_offset; +	int mux_offset2; +	int mux_start_bit; +	int div_offsets[NUM_INPUTS]; +	int fb_offsets[NUM_INPUTS]; +	int fb_start_bit_idx; +}; + +#define CKGAX_CLKOPSRC_SWITCH_OFF 0x3 + +static int clkgena_divmux_is_running(struct clkgena_divmux *mux) +{ +	u32 regval = readl(mux->feedback_reg[mux->muxsel]); +	u32 running = regval & BIT(mux->feedback_bit_idx); +	return !!running; +} + +static int clkgena_divmux_enable(struct clk_hw *hw) +{ +	struct clkgena_divmux *genamux = to_clkgena_divmux(hw); +	struct clk_hw *mux_hw = &genamux->mux.hw; +	unsigned long timeout; +	int ret = 0; + +	mux_hw->clk = hw->clk; + +	ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel); +	if (ret) +		return ret; + +	timeout = jiffies + msecs_to_jiffies(10); + +	while (!clkgena_divmux_is_running(genamux)) { +		if (time_after(jiffies, timeout)) +			return -ETIMEDOUT; +		cpu_relax(); +	} + +	return 0; +} + +static void clkgena_divmux_disable(struct clk_hw *hw) +{ +	struct clkgena_divmux *genamux = to_clkgena_divmux(hw); +	struct clk_hw *mux_hw = &genamux->mux.hw; + +	mux_hw->clk = hw->clk; + +	clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF); +} + +static int clkgena_divmux_is_enabled(struct clk_hw *hw) +{ +	struct clkgena_divmux *genamux = to_clkgena_divmux(hw); +	struct clk_hw *mux_hw = &genamux->mux.hw; + +	mux_hw->clk = hw->clk; + +	return (s8)clk_mux_ops.get_parent(mux_hw) > 0; +} + +u8 clkgena_divmux_get_parent(struct clk_hw *hw) +{ +	struct clkgena_divmux *genamux = to_clkgena_divmux(hw); +	struct clk_hw *mux_hw = &genamux->mux.hw; + +	mux_hw->clk = hw->clk; + +	genamux->muxsel = clk_mux_ops.get_parent(mux_hw); +	if ((s8)genamux->muxsel < 0) { +		pr_debug("%s: %s: Invalid parent, setting to default.\n", +		      __func__, __clk_get_name(hw->clk)); +		genamux->muxsel = 0; +	} + +	return genamux->muxsel; +} + +static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index) +{ +	struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + +	if (index >= CKGAX_CLKOPSRC_SWITCH_OFF) +		return -EINVAL; + +	genamux->muxsel = index; + +	/* +	 * If the mux is already enabled, call enable directly to set the +	 * new mux position and wait for it to start running again. Otherwise +	 * do nothing. +	 */ +	if (clkgena_divmux_is_enabled(hw)) +		clkgena_divmux_enable(hw); + +	return 0; +} + +unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw, +		unsigned long parent_rate) +{ +	struct clkgena_divmux *genamux = to_clkgena_divmux(hw); +	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + +	div_hw->clk = hw->clk; + +	return clk_divider_ops.recalc_rate(div_hw, parent_rate); +} + +static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate, +				unsigned long parent_rate) +{ +	struct clkgena_divmux *genamux = to_clkgena_divmux(hw); +	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + +	div_hw->clk = hw->clk; + +	return clk_divider_ops.set_rate(div_hw, rate, parent_rate); +} + +static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate, +				   unsigned long *prate) +{ +	struct clkgena_divmux *genamux = to_clkgena_divmux(hw); +	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + +	div_hw->clk = hw->clk; + +	return clk_divider_ops.round_rate(div_hw, rate, prate); +} + +static const struct clk_ops clkgena_divmux_ops = { +	.enable = clkgena_divmux_enable, +	.disable = clkgena_divmux_disable, +	.is_enabled = clkgena_divmux_is_enabled, +	.get_parent = clkgena_divmux_get_parent, +	.set_parent = clkgena_divmux_set_parent, +	.round_rate = clkgena_divmux_round_rate, +	.recalc_rate = clkgena_divmux_recalc_rate, +	.set_rate = clkgena_divmux_set_rate, +}; + +/** + * clk_register_genamux - register a genamux clock with the clock framework + */ +struct clk *clk_register_genamux(const char *name, +				const char **parent_names, u8 num_parents, +				void __iomem *reg, +				const struct clkgena_divmux_data *muxdata, +				u32 idx) +{ +	/* +	 * Fixed constants across all ClockgenA variants +	 */ +	const int mux_width = 2; +	const int divider_width = 5; +	struct clkgena_divmux *genamux; +	struct clk *clk; +	struct clk_init_data init; +	int i; + +	genamux = kzalloc(sizeof(*genamux), GFP_KERNEL); +	if (!genamux) +		return ERR_PTR(-ENOMEM); + +	init.name = name; +	init.ops = &clkgena_divmux_ops; +	init.flags = CLK_IS_BASIC; +	init.parent_names = parent_names; +	init.num_parents = num_parents; + +	genamux->mux.lock  = &clkgena_divmux_lock; +	genamux->mux.mask = BIT(mux_width) - 1; +	genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width); +	if (genamux->mux.shift > 31) { +		/* +		 * We have spilled into the second mux register so +		 * adjust the register address and the bit shift accordingly +		 */ +		genamux->mux.reg = reg + muxdata->mux_offset2; +		genamux->mux.shift -= 32; +	} else { +		genamux->mux.reg   = reg + muxdata->mux_offset; +	} + +	for (i = 0; i < NUM_INPUTS; i++) { +		/* +		 * Divider config for each input +		 */ +		void __iomem *divbase = reg + muxdata->div_offsets[i]; +		genamux->div[i].width = divider_width; +		genamux->div[i].reg = divbase + (idx * sizeof(u32)); + +		/* +		 * Mux enabled/running feedback register for each input. +		 */ +		genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i]; +	} + +	genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx; +	genamux->hw.init = &init; + +	clk = clk_register(NULL, &genamux->hw); +	if (IS_ERR(clk)) { +		kfree(genamux); +		goto err; +	} + +	pr_debug("%s: parent %s rate %lu\n", +			__clk_get_name(clk), +			__clk_get_name(clk_get_parent(clk)), +			clk_get_rate(clk)); +err: +	return clk; +} + +static struct clkgena_divmux_data st_divmux_c65hs = { +	.num_outputs = 4, +	.mux_offset = 0x14, +	.mux_start_bit = 0, +	.div_offsets = { 0x800, 0x900, 0xb00 }, +	.fb_offsets = { 0x18, 0x1c, 0x20 }, +	.fb_start_bit_idx = 0, +}; + +static struct clkgena_divmux_data st_divmux_c65ls = { +	.num_outputs = 14, +	.mux_offset = 0x14, +	.mux_offset2 = 0x24, +	.mux_start_bit = 8, +	.div_offsets = { 0x810, 0xa10, 0xb10 }, +	.fb_offsets = { 0x18, 0x1c, 0x20 }, +	.fb_start_bit_idx = 4, +}; + +static struct clkgena_divmux_data st_divmux_c32odf0 = { +	.num_outputs = 8, +	.mux_offset = 0x1c, +	.mux_start_bit = 0, +	.div_offsets = { 0x800, 0x900, 0xa60 }, +	.fb_offsets = { 0x2c, 0x24, 0x28 }, +	.fb_start_bit_idx = 0, +}; + +static struct clkgena_divmux_data st_divmux_c32odf1 = { +	.num_outputs = 8, +	.mux_offset = 0x1c, +	.mux_start_bit = 16, +	.div_offsets = { 0x820, 0x980, 0xa80 }, +	.fb_offsets = { 0x2c, 0x24, 0x28 }, +	.fb_start_bit_idx = 8, +}; + +static struct clkgena_divmux_data st_divmux_c32odf2 = { +	.num_outputs = 8, +	.mux_offset = 0x20, +	.mux_start_bit = 0, +	.div_offsets = { 0x840, 0xa20, 0xb10 }, +	.fb_offsets = { 0x2c, 0x24, 0x28 }, +	.fb_start_bit_idx = 16, +}; + +static struct clkgena_divmux_data st_divmux_c32odf3 = { +	.num_outputs = 8, +	.mux_offset = 0x20, +	.mux_start_bit = 16, +	.div_offsets = { 0x860, 0xa40, 0xb30 }, +	.fb_offsets = { 0x2c, 0x24, 0x28 }, +	.fb_start_bit_idx = 24, +}; + +static struct of_device_id clkgena_divmux_of_match[] = { +	{ +		.compatible = "st,clkgena-divmux-c65-hs", +		.data = &st_divmux_c65hs, +	}, +	{ +		.compatible = "st,clkgena-divmux-c65-ls", +		.data = &st_divmux_c65ls, +	}, +	{ +		.compatible = "st,clkgena-divmux-c32-odf0", +		.data = &st_divmux_c32odf0, +	}, +	{ +		.compatible = "st,clkgena-divmux-c32-odf1", +		.data = &st_divmux_c32odf1, +	}, +	{ +		.compatible = "st,clkgena-divmux-c32-odf2", +		.data = &st_divmux_c32odf2, +	}, +	{ +		.compatible = "st,clkgena-divmux-c32-odf3", +		.data = &st_divmux_c32odf3, +	}, +	{} +}; + +static void __iomem * __init clkgen_get_register_base( +				struct device_node *np) +{ +	struct device_node *pnode; +	void __iomem *reg = NULL; + +	pnode = of_get_parent(np); +	if (!pnode) +		return NULL; + +	reg = of_iomap(pnode, 0); + +	of_node_put(pnode); +	return reg; +} + +void __init st_of_clkgena_divmux_setup(struct device_node *np) +{ +	const struct of_device_id *match; +	const struct clkgena_divmux_data *data; +	struct clk_onecell_data *clk_data; +	void __iomem *reg; +	const char **parents; +	int num_parents = 0, i; + +	match = of_match_node(clkgena_divmux_of_match, np); +	if (WARN_ON(!match)) +		return; + +	data = (struct clkgena_divmux_data *)match->data; + +	reg = clkgen_get_register_base(np); +	if (!reg) +		return; + +	parents = clkgen_mux_get_parents(np, &num_parents); +	if (IS_ERR(parents)) +		return; + +	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); +	if (!clk_data) +		goto err; + +	clk_data->clk_num = data->num_outputs; +	clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), +				 GFP_KERNEL); + +	if (!clk_data->clks) +		goto err; + +	for (i = 0; i < clk_data->clk_num; i++) { +		struct clk *clk; +		const char *clk_name; + +		if (of_property_read_string_index(np, "clock-output-names", +						  i, &clk_name)) +			break; + +		/* +		 * If we read an empty clock name then the output is unused +		 */ +		if (*clk_name == '\0') +			continue; + +		clk = clk_register_genamux(clk_name, parents, num_parents, +					   reg, data, i); + +		if (IS_ERR(clk)) +			goto err; + +		clk_data->clks[i] = clk; +	} + +	kfree(parents); + +	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); +	return; +err: +	if (clk_data) +		kfree(clk_data->clks); + +	kfree(clk_data); +	kfree(parents); +} +CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup); + +struct clkgena_prediv_data { +	u32 offset; +	u8 shift; +	struct clk_div_table *table; +}; + +static struct clk_div_table prediv_table16[] = { +	{ .val = 0, .div = 1 }, +	{ .val = 1, .div = 16 }, +	{ .div = 0 }, +}; + +static struct clkgena_prediv_data prediv_c65_data = { +	.offset = 0x4c, +	.shift = 31, +	.table = prediv_table16, +}; + +static struct clkgena_prediv_data prediv_c32_data = { +	.offset = 0x50, +	.shift = 1, +	.table = prediv_table16, +}; + +static struct of_device_id clkgena_prediv_of_match[] = { +	{ .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data }, +	{ .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data }, +	{} +}; + +void __init st_of_clkgena_prediv_setup(struct device_node *np) +{ +	const struct of_device_id *match; +	void __iomem *reg; +	const char *parent_name, *clk_name; +	struct clk *clk; +	struct clkgena_prediv_data *data; + +	match = of_match_node(clkgena_prediv_of_match, np); +	if (!match) { +		pr_err("%s: No matching data\n", __func__); +		return; +	} + +	data = (struct clkgena_prediv_data *)match->data; + +	reg = clkgen_get_register_base(np); +	if (!reg) +		return; + +	parent_name = of_clk_get_parent_name(np, 0); +	if (!parent_name) +		return; + +	if (of_property_read_string_index(np, "clock-output-names", +					  0, &clk_name)) +		return; + +	clk = clk_register_divider_table(NULL, clk_name, parent_name, 0, +					 reg + data->offset, data->shift, 1, +					 0, data->table, NULL); +	if (IS_ERR(clk)) +		return; + +	of_clk_add_provider(np, of_clk_src_simple_get, clk); +	pr_debug("%s: parent %s rate %u\n", +		__clk_get_name(clk), +		__clk_get_name(clk_get_parent(clk)), +		(unsigned int)clk_get_rate(clk)); + +	return; +} +CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup); + +struct clkgen_mux_data { +	u32 offset; +	u8 shift; +	u8 width; +	spinlock_t *lock; +	unsigned long clk_flags; +	u8 mux_flags; +}; + +static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = { +	.offset = 0, +	.shift = 0, +	.width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = { +	.offset = 0, +	.shift = 0, +	.width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = { +	.offset = 0, +	.shift = 0, +	.width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = { +	.offset = 0, +	.shift = 16, +	.width = 1, +	.lock = &clkgenf_lock, +}; + +static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = { +	.offset = 0, +	.shift = 17, +	.width = 1, +	.lock = &clkgenf_lock, +}; + +static struct clkgen_mux_data stih415_a9_mux_data = { +	.offset = 0, +	.shift = 1, +	.width = 2, +}; +static struct clkgen_mux_data stih416_a9_mux_data = { +	.offset = 0, +	.shift = 0, +	.width = 2, +}; + +static struct of_device_id mux_of_match[] = { +	{ +		.compatible = "st,stih416-clkgenc-vcc-hd", +		.data = &clkgen_mux_c_vcc_hd_416, +	}, +	{ +		.compatible = "st,stih416-clkgenf-vcc-fvdp", +		.data = &clkgen_mux_f_vcc_fvdp_416, +	}, +	{ +		.compatible = "st,stih416-clkgenf-vcc-hva", +		.data = &clkgen_mux_f_vcc_hva_416, +	}, +	{ +		.compatible = "st,stih416-clkgenf-vcc-hd", +		.data = &clkgen_mux_f_vcc_hd_416, +	}, +	{ +		.compatible = "st,stih416-clkgenf-vcc-sd", +		.data = &clkgen_mux_c_vcc_sd_416, +	}, +	{ +		.compatible = "st,stih415-clkgen-a9-mux", +		.data = &stih415_a9_mux_data, +	}, +	{ +		.compatible = "st,stih416-clkgen-a9-mux", +		.data = &stih416_a9_mux_data, +	}, +	{} +}; + +void __init st_of_clkgen_mux_setup(struct device_node *np) +{ +	const struct of_device_id *match; +	struct clk *clk; +	void __iomem *reg; +	const char **parents; +	int num_parents; +	struct clkgen_mux_data *data; + +	match = of_match_node(mux_of_match, np); +	if (!match) { +		pr_err("%s: No matching data\n", __func__); +		return; +	} + +	data = (struct clkgen_mux_data *)match->data; + +	reg = of_iomap(np, 0); +	if (!reg) { +		pr_err("%s: Failed to get base address\n", __func__); +		return; +	} + +	parents = clkgen_mux_get_parents(np, &num_parents); +	if (IS_ERR(parents)) { +		pr_err("%s: Failed to get parents (%ld)\n", +				__func__, PTR_ERR(parents)); +		return; +	} + +	clk = clk_register_mux(NULL, np->name, parents, num_parents, +				data->clk_flags | CLK_SET_RATE_PARENT, +				reg + data->offset, +				data->shift, data->width, data->mux_flags, +				data->lock); +	if (IS_ERR(clk)) +		goto err; + +	pr_debug("%s: parent %s rate %u\n", +			__clk_get_name(clk), +			__clk_get_name(clk_get_parent(clk)), +			(unsigned int)clk_get_rate(clk)); + +	of_clk_add_provider(np, of_clk_src_simple_get, clk); + +err: +	kfree(parents); + +	return; +} +CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup); + +#define VCC_MAX_CHANNELS 16 + +#define VCC_GATE_OFFSET 0x0 +#define VCC_MUX_OFFSET 0x4 +#define VCC_DIV_OFFSET 0x8 + +struct clkgen_vcc_data { +	spinlock_t *lock; +	unsigned long clk_flags; +}; + +static struct clkgen_vcc_data st_clkgenc_vcc_416 = { +	.clk_flags = CLK_SET_RATE_PARENT, +}; + +static struct clkgen_vcc_data st_clkgenf_vcc_416 = { +	.lock = &clkgenf_lock, +}; + +static struct of_device_id vcc_of_match[] = { +	{ .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 }, +	{ .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 }, +	{} +}; + +void __init st_of_clkgen_vcc_setup(struct device_node *np) +{ +	const struct of_device_id *match; +	void __iomem *reg; +	const char **parents; +	int num_parents, i; +	struct clk_onecell_data *clk_data; +	struct clkgen_vcc_data *data; + +	match = of_match_node(vcc_of_match, np); +	if (WARN_ON(!match)) +		return; +	data = (struct clkgen_vcc_data *)match->data; + +	reg = of_iomap(np, 0); +	if (!reg) +		return; + +	parents = clkgen_mux_get_parents(np, &num_parents); +	if (IS_ERR(parents)) +		return; + +	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); +	if (!clk_data) +		goto err; + +	clk_data->clk_num = VCC_MAX_CHANNELS; +	clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), +				 GFP_KERNEL); + +	if (!clk_data->clks) +		goto err; + +	for (i = 0; i < clk_data->clk_num; i++) { +		struct clk *clk; +		const char *clk_name; +		struct clk_gate *gate; +		struct clk_divider *div; +		struct clk_mux *mux; + +		if (of_property_read_string_index(np, "clock-output-names", +						  i, &clk_name)) +			break; + +		/* +		 * If we read an empty clock name then the output is unused +		 */ +		if (*clk_name == '\0') +			continue; + +		gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); +		if (!gate) +			break; + +		div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); +		if (!div) { +			kfree(gate); +			break; +		} + +		mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); +		if (!mux) { +			kfree(gate); +			kfree(div); +			break; +		} + +		gate->reg = reg + VCC_GATE_OFFSET; +		gate->bit_idx = i; +		gate->flags = CLK_GATE_SET_TO_DISABLE; +		gate->lock = data->lock; + +		div->reg = reg + VCC_DIV_OFFSET; +		div->shift = 2 * i; +		div->width = 2; +		div->flags = CLK_DIVIDER_POWER_OF_TWO; + +		mux->reg = reg + VCC_MUX_OFFSET; +		mux->shift = 2 * i; +		mux->mask = 0x3; + +		clk = clk_register_composite(NULL, clk_name, parents, +					     num_parents, +					     &mux->hw, &clk_mux_ops, +					     &div->hw, &clk_divider_ops, +					     &gate->hw, &clk_gate_ops, +					     data->clk_flags); +		if (IS_ERR(clk)) { +			kfree(gate); +			kfree(div); +			kfree(mux); +			goto err; +		} + +		pr_debug("%s: parent %s rate %u\n", +			__clk_get_name(clk), +			__clk_get_name(clk_get_parent(clk)), +			(unsigned int)clk_get_rate(clk)); + +		clk_data->clks[i] = clk; +	} + +	kfree(parents); + +	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); +	return; + +err: +	for (i = 0; i < clk_data->clk_num; i++) { +		struct clk_composite *composite; + +		if (!clk_data->clks[i]) +			continue; + +		composite = container_of(__clk_get_hw(clk_data->clks[i]), +					 struct clk_composite, hw); +		kfree(container_of(composite->gate_hw, struct clk_gate, hw)); +		kfree(container_of(composite->rate_hw, struct clk_divider, hw)); +		kfree(container_of(composite->mux_hw, struct clk_mux, hw)); +	} + +	if (clk_data) +		kfree(clk_data->clks); + +	kfree(clk_data); +	kfree(parents); +} +CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup); diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c new file mode 100644 index 00000000000..d8b9b1a2aed --- /dev/null +++ b/drivers/clk/st/clkgen-pll.c @@ -0,0 +1,701 @@ +/* + * Copyright (C) 2014 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +/* + * Authors: + * Stephen Gallimore <stephen.gallimore@st.com>, + * Pankaj Dev <pankaj.dev@st.com>. + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +#include "clkgen.h" + +static DEFINE_SPINLOCK(clkgena_c32_odf_lock); + +/* + * Common PLL configuration register bits for PLL800 and PLL1600 C65 + */ +#define C65_MDIV_PLL800_MASK	(0xff) +#define C65_MDIV_PLL1600_MASK	(0x7) +#define C65_NDIV_MASK		(0xff) +#define C65_PDIV_MASK		(0x7) + +/* + * PLL configuration register bits for PLL3200 C32 + */ +#define C32_NDIV_MASK (0xff) +#define C32_IDF_MASK (0x7) +#define C32_ODF_MASK (0x3f) +#define C32_LDF_MASK (0x7f) + +#define C32_MAX_ODFS (4) + +struct clkgen_pll_data { +	struct clkgen_field pdn_status; +	struct clkgen_field locked_status; +	struct clkgen_field mdiv; +	struct clkgen_field ndiv; +	struct clkgen_field pdiv; +	struct clkgen_field idf; +	struct clkgen_field ldf; +	unsigned int num_odfs; +	struct clkgen_field odf[C32_MAX_ODFS]; +	struct clkgen_field odf_gate[C32_MAX_ODFS]; +	const struct clk_ops *ops; +}; + +static const struct clk_ops st_pll1600c65_ops; +static const struct clk_ops st_pll800c65_ops; +static const struct clk_ops stm_pll3200c32_ops; +static const struct clk_ops st_pll1200c32_ops; + +static struct clkgen_pll_data st_pll1600c65_ax = { +	.pdn_status	= CLKGEN_FIELD(0x0, 0x1,			19), +	.locked_status	= CLKGEN_FIELD(0x0, 0x1,			31), +	.mdiv		= CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK,	0), +	.ndiv		= CLKGEN_FIELD(0x0, C65_NDIV_MASK,		8), +	.ops		= &st_pll1600c65_ops +}; + +static struct clkgen_pll_data st_pll800c65_ax = { +	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			19), +	.locked_status	= CLKGEN_FIELD(0x0,	0x1,			31), +	.mdiv		= CLKGEN_FIELD(0x0,	C65_MDIV_PLL800_MASK,	0), +	.ndiv		= CLKGEN_FIELD(0x0,	C65_NDIV_MASK,		8), +	.pdiv		= CLKGEN_FIELD(0x0,	C65_PDIV_MASK,		16), +	.ops		= &st_pll800c65_ops +}; + +static struct clkgen_pll_data st_pll3200c32_a1x_0 = { +	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			31), +	.locked_status	= CLKGEN_FIELD(0x4,	0x1,			31), +	.ndiv		= CLKGEN_FIELD(0x0,	C32_NDIV_MASK,		0x0), +	.idf		= CLKGEN_FIELD(0x4,	C32_IDF_MASK,		0x0), +	.num_odfs = 4, +	.odf =	{	CLKGEN_FIELD(0x54,	C32_ODF_MASK,		4), +			CLKGEN_FIELD(0x54,	C32_ODF_MASK,		10), +			CLKGEN_FIELD(0x54,	C32_ODF_MASK,		16), +			CLKGEN_FIELD(0x54,	C32_ODF_MASK,		22) }, +	.odf_gate = {	CLKGEN_FIELD(0x54,	0x1,			0), +			CLKGEN_FIELD(0x54,	0x1,			1), +			CLKGEN_FIELD(0x54,	0x1,			2), +			CLKGEN_FIELD(0x54,	0x1,			3) }, +	.ops		= &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll3200c32_a1x_1 = { +	.pdn_status	= CLKGEN_FIELD(0xC,	0x1,			31), +	.locked_status	= CLKGEN_FIELD(0x10,	0x1,			31), +	.ndiv		= CLKGEN_FIELD(0xC,	C32_NDIV_MASK,		0x0), +	.idf		= CLKGEN_FIELD(0x10,	C32_IDF_MASK,		0x0), +	.num_odfs = 4, +	.odf = {	CLKGEN_FIELD(0x58,	C32_ODF_MASK,		4), +			CLKGEN_FIELD(0x58,	C32_ODF_MASK,		10), +			CLKGEN_FIELD(0x58,	C32_ODF_MASK,		16), +			CLKGEN_FIELD(0x58,	C32_ODF_MASK,		22) }, +	.odf_gate = {	CLKGEN_FIELD(0x58,	0x1,			0), +			CLKGEN_FIELD(0x58,	0x1,			1), +			CLKGEN_FIELD(0x58,	0x1,			2), +			CLKGEN_FIELD(0x58,	0x1,			3) }, +	.ops		= &stm_pll3200c32_ops, +}; + +/* 415 specific */ +static struct clkgen_pll_data st_pll3200c32_a9_415 = { +	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			0), +	.locked_status	= CLKGEN_FIELD(0x6C,	0x1,			0), +	.ndiv		= CLKGEN_FIELD(0x0,	C32_NDIV_MASK,		9), +	.idf		= CLKGEN_FIELD(0x0,	C32_IDF_MASK,		22), +	.num_odfs = 1, +	.odf =		{ CLKGEN_FIELD(0x0,	C32_ODF_MASK,		3) }, +	.odf_gate =	{ CLKGEN_FIELD(0x0,	0x1,			28) }, +	.ops		= &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll3200c32_ddr_415 = { +	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			0), +	.locked_status	= CLKGEN_FIELD(0x100,	0x1,			0), +	.ndiv		= CLKGEN_FIELD(0x8,	C32_NDIV_MASK,		0), +	.idf		= CLKGEN_FIELD(0x0,	C32_IDF_MASK,		25), +	.num_odfs = 2, +	.odf		= { CLKGEN_FIELD(0x8,	C32_ODF_MASK,		8), +			    CLKGEN_FIELD(0x8,	C32_ODF_MASK,		14) }, +	.odf_gate	= { CLKGEN_FIELD(0x4,	0x1,			28), +			    CLKGEN_FIELD(0x4,	0x1,			29) }, +	.ops		= &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll1200c32_gpu_415 = { +	.pdn_status	= CLKGEN_FIELD(0x144,	0x1,			3), +	.locked_status	= CLKGEN_FIELD(0x168,	0x1,			0), +	.ldf		= CLKGEN_FIELD(0x0,	C32_LDF_MASK,		3), +	.idf		= CLKGEN_FIELD(0x0,	C32_IDF_MASK,		0), +	.num_odfs = 0, +	.odf		= { CLKGEN_FIELD(0x0,	C32_ODF_MASK,		10) }, +	.ops		= &st_pll1200c32_ops, +}; + +/* 416 specific */ +static struct clkgen_pll_data st_pll3200c32_a9_416 = { +	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			0), +	.locked_status	= CLKGEN_FIELD(0x6C,	0x1,			0), +	.ndiv		= CLKGEN_FIELD(0x8,	C32_NDIV_MASK,		0), +	.idf		= CLKGEN_FIELD(0x0,	C32_IDF_MASK,		25), +	.num_odfs = 1, +	.odf		= { CLKGEN_FIELD(0x8,	C32_ODF_MASK,		8) }, +	.odf_gate	= { CLKGEN_FIELD(0x4,	0x1,			28) }, +	.ops		= &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll3200c32_ddr_416 = { +	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			0), +	.locked_status	= CLKGEN_FIELD(0x10C,	0x1,			0), +	.ndiv		= CLKGEN_FIELD(0x8,	C32_NDIV_MASK,		0), +	.idf		= CLKGEN_FIELD(0x0,	C32_IDF_MASK,		25), +	.num_odfs = 2, +	.odf		= { CLKGEN_FIELD(0x8,	C32_ODF_MASK,		8), +			    CLKGEN_FIELD(0x8,	C32_ODF_MASK,		14) }, +	.odf_gate	= { CLKGEN_FIELD(0x4,	0x1,			28), +			    CLKGEN_FIELD(0x4,	0x1,			29) }, +	.ops		= &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll1200c32_gpu_416 = { +	.pdn_status	= CLKGEN_FIELD(0x8E4,	0x1,			3), +	.locked_status	= CLKGEN_FIELD(0x90C,	0x1,			0), +	.ldf		= CLKGEN_FIELD(0x0,	C32_LDF_MASK,		3), +	.idf		= CLKGEN_FIELD(0x0,	C32_IDF_MASK,		0), +	.num_odfs = 0, +	.odf		= { CLKGEN_FIELD(0x0,	C32_ODF_MASK,		10) }, +	.ops		= &st_pll1200c32_ops, +}; + +/** + * DOC: Clock Generated by PLL, rate set and enabled by bootloader + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable/disable only ensures parent is enabled + * rate - rate is fixed. No clk_set_rate support + * parent - fixed parent.  No clk_set_parent support + */ + +/** + * PLL clock that is integrated in the ClockGenA instances on the STiH415 + * and STiH416. + * + * @hw: handle between common and hardware-specific interfaces. + * @type: PLL instance type. + * @regs_base: base of the PLL configuration register(s). + * + */ +struct clkgen_pll { +	struct clk_hw		hw; +	struct clkgen_pll_data	*data; +	void __iomem		*regs_base; +}; + +#define to_clkgen_pll(_hw) container_of(_hw, struct clkgen_pll, hw) + +static int clkgen_pll_is_locked(struct clk_hw *hw) +{ +	struct clkgen_pll *pll = to_clkgen_pll(hw); +	u32 locked = CLKGEN_READ(pll, locked_status); + +	return !!locked; +} + +static int clkgen_pll_is_enabled(struct clk_hw *hw) +{ +	struct clkgen_pll *pll = to_clkgen_pll(hw); +	u32 poweroff = CLKGEN_READ(pll, pdn_status); +	return !poweroff; +} + +unsigned long recalc_stm_pll800c65(struct clk_hw *hw, +		unsigned long parent_rate) +{ +	struct clkgen_pll *pll = to_clkgen_pll(hw); +	unsigned long mdiv, ndiv, pdiv; +	unsigned long rate; +	uint64_t res; + +	if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) +		return 0; + +	pdiv = CLKGEN_READ(pll, pdiv); +	mdiv = CLKGEN_READ(pll, mdiv); +	ndiv = CLKGEN_READ(pll, ndiv); + +	if (!mdiv) +		mdiv++; /* mdiv=0 or 1 => MDIV=1 */ + +	res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv; +	rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv)); + +	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + +	return rate; + +} + +unsigned long recalc_stm_pll1600c65(struct clk_hw *hw, +		unsigned long parent_rate) +{ +	struct clkgen_pll *pll = to_clkgen_pll(hw); +	unsigned long mdiv, ndiv; +	unsigned long rate; + +	if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) +		return 0; + +	mdiv = CLKGEN_READ(pll, mdiv); +	ndiv = CLKGEN_READ(pll, ndiv); + +	if (!mdiv) +		mdiv = 1; + +	/* Note: input is divided by 1000 to avoid overflow */ +	rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000; + +	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + +	return rate; +} + +unsigned long recalc_stm_pll3200c32(struct clk_hw *hw, +		unsigned long parent_rate) +{ +	struct clkgen_pll *pll = to_clkgen_pll(hw); +	unsigned long ndiv, idf; +	unsigned long rate = 0; + +	if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) +		return 0; + +	ndiv = CLKGEN_READ(pll, ndiv); +	idf = CLKGEN_READ(pll, idf); + +	if (idf) +		/* Note: input is divided to avoid overflow */ +		rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000; + +	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + +	return rate; +} + +unsigned long recalc_stm_pll1200c32(struct clk_hw *hw, +		unsigned long parent_rate) +{ +	struct clkgen_pll *pll = to_clkgen_pll(hw); +	unsigned long odf, ldf, idf; +	unsigned long rate; + +	if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) +		return 0; + +	odf = CLKGEN_READ(pll, odf[0]); +	ldf = CLKGEN_READ(pll, ldf); +	idf = CLKGEN_READ(pll, idf); + +	if (!idf) /* idf==0 means 1 */ +		idf = 1; +	if (!odf) /* odf==0 means 1 */ +		odf = 1; + +	/* Note: input is divided by 1000 to avoid overflow */ +	rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000; + +	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + +	return rate; +} + +static const struct clk_ops st_pll1600c65_ops = { +	.is_enabled	= clkgen_pll_is_enabled, +	.recalc_rate	= recalc_stm_pll1600c65, +}; + +static const struct clk_ops st_pll800c65_ops = { +	.is_enabled	= clkgen_pll_is_enabled, +	.recalc_rate	= recalc_stm_pll800c65, +}; + +static const struct clk_ops stm_pll3200c32_ops = { +	.is_enabled	= clkgen_pll_is_enabled, +	.recalc_rate	= recalc_stm_pll3200c32, +}; + +static const struct clk_ops st_pll1200c32_ops = { +	.is_enabled	= clkgen_pll_is_enabled, +	.recalc_rate	= recalc_stm_pll1200c32, +}; + +static struct clk * __init clkgen_pll_register(const char *parent_name, +				struct clkgen_pll_data	*pll_data, +				void __iomem *reg, +				const char *clk_name) +{ +	struct clkgen_pll *pll; +	struct clk *clk; +	struct clk_init_data init; + +	pll = kzalloc(sizeof(*pll), GFP_KERNEL); +	if (!pll) +		return ERR_PTR(-ENOMEM); + +	init.name = clk_name; +	init.ops = pll_data->ops; + +	init.flags = CLK_IS_BASIC; +	init.parent_names = &parent_name; +	init.num_parents  = 1; + +	pll->data = pll_data; +	pll->regs_base = reg; +	pll->hw.init = &init; + +	clk = clk_register(NULL, &pll->hw); +	if (IS_ERR(clk)) { +		kfree(pll); +		return clk; +	} + +	pr_debug("%s: parent %s rate %lu\n", +			__clk_get_name(clk), +			__clk_get_name(clk_get_parent(clk)), +			clk_get_rate(clk)); + +	return clk; +} + +static struct clk * __init clkgen_c65_lsdiv_register(const char *parent_name, +						     const char *clk_name) +{ +	struct clk *clk; + +	clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0, 1, 2); +	if (IS_ERR(clk)) +		return clk; + +	pr_debug("%s: parent %s rate %lu\n", +			__clk_get_name(clk), +			__clk_get_name(clk_get_parent(clk)), +			clk_get_rate(clk)); +	return clk; +} + +static void __iomem * __init clkgen_get_register_base( +				struct device_node *np) +{ +	struct device_node *pnode; +	void __iomem *reg = NULL; + +	pnode = of_get_parent(np); +	if (!pnode) +		return NULL; + +	reg = of_iomap(pnode, 0); + +	of_node_put(pnode); +	return reg; +} + +#define CLKGENAx_PLL0_OFFSET 0x0 +#define CLKGENAx_PLL1_OFFSET 0x4 + +static void __init clkgena_c65_pll_setup(struct device_node *np) +{ +	const int num_pll_outputs = 3; +	struct clk_onecell_data *clk_data; +	const char *parent_name; +	void __iomem *reg; +	const char *clk_name; + +	parent_name = of_clk_get_parent_name(np, 0); +	if (!parent_name) +		return; + +	reg = clkgen_get_register_base(np); +	if (!reg) +		return; + +	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); +	if (!clk_data) +		return; + +	clk_data->clk_num = num_pll_outputs; +	clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), +				 GFP_KERNEL); + +	if (!clk_data->clks) +		goto err; + +	if (of_property_read_string_index(np, "clock-output-names", +					  0, &clk_name)) +		goto err; + +	/* +	 * PLL0 HS (high speed) output +	 */ +	clk_data->clks[0] = clkgen_pll_register(parent_name, +						&st_pll1600c65_ax, +						reg + CLKGENAx_PLL0_OFFSET, +						clk_name); + +	if (IS_ERR(clk_data->clks[0])) +		goto err; + +	if (of_property_read_string_index(np, "clock-output-names", +					  1, &clk_name)) +		goto err; + +	/* +	 * PLL0 LS (low speed) output, which is a fixed divide by 2 of the +	 * high speed output. +	 */ +	clk_data->clks[1] = clkgen_c65_lsdiv_register(__clk_get_name +						      (clk_data->clks[0]), +						      clk_name); + +	if (IS_ERR(clk_data->clks[1])) +		goto err; + +	if (of_property_read_string_index(np, "clock-output-names", +					  2, &clk_name)) +		goto err; + +	/* +	 * PLL1 output +	 */ +	clk_data->clks[2] = clkgen_pll_register(parent_name, +						&st_pll800c65_ax, +						reg + CLKGENAx_PLL1_OFFSET, +						clk_name); + +	if (IS_ERR(clk_data->clks[2])) +		goto err; + +	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); +	return; + +err: +	kfree(clk_data->clks); +	kfree(clk_data); +} +CLK_OF_DECLARE(clkgena_c65_plls, +	       "st,clkgena-plls-c65", clkgena_c65_pll_setup); + +static struct clk * __init clkgen_odf_register(const char *parent_name, +					       void * __iomem reg, +					       struct clkgen_pll_data *pll_data, +					       int odf, +					       spinlock_t *odf_lock, +					       const char *odf_name) +{ +	struct clk *clk; +	unsigned long flags; +	struct clk_gate *gate; +	struct clk_divider *div; + +	flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_GATE; + +	gate = kzalloc(sizeof(*gate), GFP_KERNEL); +	if (!gate) +		return ERR_PTR(-ENOMEM); + +	gate->flags = CLK_GATE_SET_TO_DISABLE; +	gate->reg = reg + pll_data->odf_gate[odf].offset; +	gate->bit_idx = pll_data->odf_gate[odf].shift; +	gate->lock = odf_lock; + +	div = kzalloc(sizeof(*div), GFP_KERNEL); +	if (!div) { +		kfree(gate); +		return ERR_PTR(-ENOMEM); +	} + +	div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; +	div->reg = reg + pll_data->odf[odf].offset; +	div->shift = pll_data->odf[odf].shift; +	div->width = fls(pll_data->odf[odf].mask); +	div->lock = odf_lock; + +	clk = clk_register_composite(NULL, odf_name, &parent_name, 1, +				     NULL, NULL, +				     &div->hw, &clk_divider_ops, +				     &gate->hw, &clk_gate_ops, +				     flags); +	if (IS_ERR(clk)) +		return clk; + +	pr_debug("%s: parent %s rate %lu\n", +			__clk_get_name(clk), +			__clk_get_name(clk_get_parent(clk)), +			clk_get_rate(clk)); +	return clk; +} + +static struct of_device_id c32_pll_of_match[] = { +	{ +		.compatible = "st,plls-c32-a1x-0", +		.data = &st_pll3200c32_a1x_0, +	}, +	{ +		.compatible = "st,plls-c32-a1x-1", +		.data = &st_pll3200c32_a1x_1, +	}, +	{ +		.compatible = "st,stih415-plls-c32-a9", +		.data = &st_pll3200c32_a9_415, +	}, +	{ +		.compatible = "st,stih415-plls-c32-ddr", +		.data = &st_pll3200c32_ddr_415, +	}, +	{ +		.compatible = "st,stih416-plls-c32-a9", +		.data = &st_pll3200c32_a9_416, +	}, +	{ +		.compatible = "st,stih416-plls-c32-ddr", +		.data = &st_pll3200c32_ddr_416, +	}, +	{} +}; + +static void __init clkgen_c32_pll_setup(struct device_node *np) +{ +	const struct of_device_id *match; +	struct clk *clk; +	const char *parent_name, *pll_name; +	void __iomem *pll_base; +	int num_odfs, odf; +	struct clk_onecell_data *clk_data; +	struct clkgen_pll_data	*data; + +	match = of_match_node(c32_pll_of_match, np); +	if (!match) { +		pr_err("%s: No matching data\n", __func__); +		return; +	} + +	data = (struct clkgen_pll_data *) match->data; + +	parent_name = of_clk_get_parent_name(np, 0); +	if (!parent_name) +		return; + +	pll_base = clkgen_get_register_base(np); +	if (!pll_base) +		return; + +	clk = clkgen_pll_register(parent_name, data, pll_base, np->name); +	if (IS_ERR(clk)) +		return; + +	pll_name = __clk_get_name(clk); + +	num_odfs = data->num_odfs; + +	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); +	if (!clk_data) +		return; + +	clk_data->clk_num = num_odfs; +	clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), +				 GFP_KERNEL); + +	if (!clk_data->clks) +		goto err; + +	for (odf = 0; odf < num_odfs; odf++) { +		struct clk *clk; +		const char *clk_name; + +		if (of_property_read_string_index(np, "clock-output-names", +						  odf, &clk_name)) +			return; + +		clk = clkgen_odf_register(pll_name, pll_base, data, +				odf, &clkgena_c32_odf_lock, clk_name); +		if (IS_ERR(clk)) +			goto err; + +		clk_data->clks[odf] = clk; +	} + +	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); +	return; + +err: +	kfree(pll_name); +	kfree(clk_data->clks); +	kfree(clk_data); +} +CLK_OF_DECLARE(clkgen_c32_pll, "st,clkgen-plls-c32", clkgen_c32_pll_setup); + +static struct of_device_id c32_gpu_pll_of_match[] = { +	{ +		.compatible = "st,stih415-gpu-pll-c32", +		.data = &st_pll1200c32_gpu_415, +	}, +	{ +		.compatible = "st,stih416-gpu-pll-c32", +		.data = &st_pll1200c32_gpu_416, +	}, +	{} +}; + +static void __init clkgengpu_c32_pll_setup(struct device_node *np) +{ +	const struct of_device_id *match; +	struct clk *clk; +	const char *parent_name; +	void __iomem *reg; +	const char *clk_name; +	struct clkgen_pll_data	*data; + +	match = of_match_node(c32_gpu_pll_of_match, np); +	if (!match) { +		pr_err("%s: No matching data\n", __func__); +		return; +	} + +	data = (struct clkgen_pll_data *)match->data; + +	parent_name = of_clk_get_parent_name(np, 0); +	if (!parent_name) +		return; + +	reg = clkgen_get_register_base(np); +	if (!reg) +		return; + +	if (of_property_read_string_index(np, "clock-output-names", +					  0, &clk_name)) +		return; + +	/* +	 * PLL 1200MHz output +	 */ +	clk = clkgen_pll_register(parent_name, data, reg, clk_name); + +	if (!IS_ERR(clk)) +		of_clk_add_provider(np, of_clk_src_simple_get, clk); + +	return; +} +CLK_OF_DECLARE(clkgengpu_c32_pll, +	       "st,clkgengpu-pll-c32", clkgengpu_c32_pll_setup); diff --git a/drivers/clk/st/clkgen.h b/drivers/clk/st/clkgen.h new file mode 100644 index 00000000000..35c86329526 --- /dev/null +++ b/drivers/clk/st/clkgen.h @@ -0,0 +1,48 @@ +/************************************************************************ +File  : Clock H/w specific Information + +Author: Pankaj Dev <pankaj.dev@st.com> + +Copyright (C) 2014 STMicroelectronics +************************************************************************/ + +#ifndef __CLKGEN_INFO_H +#define __CLKGEN_INFO_H + +struct clkgen_field { +	unsigned int offset; +	unsigned int mask; +	unsigned int shift; +}; + +static inline unsigned long clkgen_read(void __iomem	*base, +					  struct clkgen_field *field) +{ +	return (readl(base + field->offset) >> field->shift) & field->mask; +} + + +static inline void clkgen_write(void __iomem *base, struct clkgen_field *field, +				  unsigned long val) +{ +	writel((readl(base + field->offset) & +	       ~(field->mask << field->shift)) | (val << field->shift), +	       base + field->offset); + +	return; +} + +#define CLKGEN_FIELD(_offset, _mask, _shift) {		\ +				.offset	= _offset,	\ +				.mask	= _mask,	\ +				.shift	= _shift,	\ +				} + +#define CLKGEN_READ(pll, field) clkgen_read(pll->regs_base, \ +		&pll->data->field) + +#define CLKGEN_WRITE(pll, field, val) clkgen_write(pll->regs_base, \ +		&pll->data->field, val) + +#endif /*__CLKGEN_INFO_H*/ +  | 
