diff options
Diffstat (limited to 'drivers/sh/clk')
| -rw-r--r-- | drivers/sh/clk/core.c | 243 | ||||
| -rw-r--r-- | drivers/sh/clk/cpg.c | 525 |
2 files changed, 407 insertions, 361 deletions
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index 09615b51d59..be56b22ca94 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -21,12 +21,10 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/list.h> -#include <linux/kobject.h> -#include <linux/sysdev.h> +#include <linux/syscore_ops.h> #include <linux/seq_file.h> #include <linux/err.h> #include <linux/io.h> -#include <linux/debugfs.h> #include <linux/cpufreq.h> #include <linux/clk.h> #include <linux/sh_clk.h> @@ -35,6 +33,9 @@ static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); static DEFINE_MUTEX(clock_list_sem); +/* clock disable operations are not passed on to hardware during boot */ +static int allow_disable; + void clk_rate_table_build(struct clk *clk, struct cpufreq_frequency_table *freq_table, int nr_freqs, @@ -62,12 +63,12 @@ void clk_rate_table_build(struct clk *clk, else freq = clk->parent->rate * mult / div; - freq_table[i].index = i; + freq_table[i].driver_data = i; freq_table[i].frequency = freq; } /* Termination entry */ - freq_table[i].index = i; + freq_table[i].driver_data = i; freq_table[i].frequency = CPUFREQ_TABLE_END; } @@ -171,21 +172,35 @@ long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, return clk_rate_round_helper(&div_range_round); } +static long clk_rate_mult_range_iter(unsigned int pos, + struct clk_rate_round_data *rounder) +{ + return clk_get_rate(rounder->arg) * pos; +} + +long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min, + unsigned int mult_max, unsigned long rate) +{ + struct clk_rate_round_data mult_range_round = { + .min = mult_min, + .max = mult_max, + .func = clk_rate_mult_range_iter, + .arg = clk_get_parent(clk), + .rate = rate, + }; + + return clk_rate_round_helper(&mult_range_round); +} + int clk_rate_table_find(struct clk *clk, struct cpufreq_frequency_table *freq_table, unsigned long rate) { - int i; + struct cpufreq_frequency_table *pos; - for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { - unsigned long freq = freq_table[i].frequency; - - if (freq == CPUFREQ_ENTRY_INVALID) - continue; - - if (freq == rate) - return i; - } + cpufreq_for_each_valid_entry(pos, freq_table) + if (pos->frequency == rate) + return pos - freq_table; return -ENOENT; } @@ -203,9 +218,6 @@ int clk_reparent(struct clk *child, struct clk *parent) list_add(&child->sibling, &parent->children); child->parent = parent; - /* now do the debugfs renaming to reattach the child - to the proper parent */ - return 0; } @@ -229,7 +241,7 @@ static void __clk_disable(struct clk *clk) return; if (!(--clk->usecount)) { - if (likely(clk->ops && clk->ops->disable)) + if (likely(allow_disable && clk->ops && clk->ops->disable)) clk->ops->disable(clk); if (likely(clk->parent)) __clk_disable(clk->parent); @@ -337,7 +349,7 @@ static int clk_establish_mapping(struct clk *clk) */ if (!clk->parent) { clk->mapping = &dummy_mapping; - return 0; + goto out; } /* @@ -366,6 +378,9 @@ static int clk_establish_mapping(struct clk *clk) } clk->mapping = mapping; +out: + clk->mapped_reg = clk->mapping->base; + clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys; return 0; } @@ -384,17 +399,19 @@ static void clk_teardown_mapping(struct clk *clk) /* Nothing to do */ if (mapping == &dummy_mapping) - return; + goto out; kref_put(&mapping->ref, clk_destroy_mapping); clk->mapping = NULL; +out: + clk->mapped_reg = NULL; } int clk_register(struct clk *clk) { int ret; - if (clk == NULL || IS_ERR(clk)) + if (IS_ERR_OR_NULL(clk)) return -EINVAL; /* @@ -418,8 +435,11 @@ int clk_register(struct clk *clk) list_add(&clk->sibling, &root_clks); list_add(&clk->node, &clock_list); + +#ifdef CONFIG_SH_CLK_CPG_LEGACY if (clk->ops && clk->ops->init) clk->ops->init(clk); +#endif out_unlock: mutex_unlock(&clock_list_sem); @@ -455,19 +475,13 @@ EXPORT_SYMBOL_GPL(clk_get_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { - return clk_set_rate_ex(clk, rate, 0); -} -EXPORT_SYMBOL_GPL(clk_set_rate); - -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) -{ int ret = -EOPNOTSUPP; unsigned long flags; spin_lock_irqsave(&clock_lock, flags); if (likely(clk->ops && clk->ops->set_rate)) { - ret = clk->ops->set_rate(clk, rate, algo_id); + ret = clk->ops->set_rate(clk, rate); if (ret != 0) goto out_unlock; } else { @@ -485,7 +499,7 @@ out_unlock: return ret; } -EXPORT_SYMBOL_GPL(clk_set_rate_ex); +EXPORT_SYMBOL_GPL(clk_set_rate); int clk_set_parent(struct clk *clk, struct clk *parent) { @@ -555,11 +569,7 @@ long clk_round_parent(struct clk *clk, unsigned long target, return abs(target - *best_freq); } - for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END; - freq++) { - if (freq->frequency == CPUFREQ_ENTRY_INVALID) - continue; - + cpufreq_for_each_valid_entry(freq, parent->freq_table) { if (unlikely(freq->frequency / target <= div_min - 1)) { unsigned long freq_max; @@ -571,7 +581,7 @@ long clk_round_parent(struct clk *clk, unsigned long target, *best_freq = freq_max; } - pr_debug("too low freq %lu, error %lu\n", freq->frequency, + pr_debug("too low freq %u, error %lu\n", freq->frequency, target - freq_max); if (!error) @@ -591,7 +601,7 @@ long clk_round_parent(struct clk *clk, unsigned long target, *best_freq = freq_min; } - pr_debug("too high freq %lu, error %lu\n", freq->frequency, + pr_debug("too high freq %u, error %lu\n", freq->frequency, freq_min - target); if (!error) @@ -634,153 +644,56 @@ long clk_round_parent(struct clk *clk, unsigned long target, EXPORT_SYMBOL_GPL(clk_round_parent); #ifdef CONFIG_PM -static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) +static void clks_core_resume(void) { - static pm_message_t prev_state; struct clk *clkp; - switch (state.event) { - case PM_EVENT_ON: - /* Resumeing from hibernation */ - if (prev_state.event != PM_EVENT_FREEZE) - break; - - list_for_each_entry(clkp, &clock_list, node) { - if (likely(clkp->ops)) { - unsigned long rate = clkp->rate; - - if (likely(clkp->ops->set_parent)) - clkp->ops->set_parent(clkp, - clkp->parent); - if (likely(clkp->ops->set_rate)) - clkp->ops->set_rate(clkp, - rate, NO_CHANGE); - else if (likely(clkp->ops->recalc)) - clkp->rate = clkp->ops->recalc(clkp); - } + list_for_each_entry(clkp, &clock_list, node) { + if (likely(clkp->usecount && clkp->ops)) { + unsigned long rate = clkp->rate; + + if (likely(clkp->ops->set_parent)) + clkp->ops->set_parent(clkp, + clkp->parent); + if (likely(clkp->ops->set_rate)) + clkp->ops->set_rate(clkp, rate); + else if (likely(clkp->ops->recalc)) + clkp->rate = clkp->ops->recalc(clkp); } - break; - case PM_EVENT_FREEZE: - break; - case PM_EVENT_SUSPEND: - break; } - - prev_state = state; - return 0; -} - -static int clks_sysdev_resume(struct sys_device *dev) -{ - return clks_sysdev_suspend(dev, PMSG_ON); } -static struct sysdev_class clks_sysdev_class = { - .name = "clks", -}; - -static struct sysdev_driver clks_sysdev_driver = { - .suspend = clks_sysdev_suspend, - .resume = clks_sysdev_resume, -}; - -static struct sys_device clks_sysdev_dev = { - .cls = &clks_sysdev_class, +static struct syscore_ops clks_syscore_ops = { + .resume = clks_core_resume, }; -static int __init clk_sysdev_init(void) +static int __init clk_syscore_init(void) { - sysdev_class_register(&clks_sysdev_class); - sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); - sysdev_register(&clks_sysdev_dev); + register_syscore_ops(&clks_syscore_ops); return 0; } -subsys_initcall(clk_sysdev_init); +subsys_initcall(clk_syscore_init); #endif -/* - * debugfs support to trace clock tree hierarchy and attributes - */ -static struct dentry *clk_debugfs_root; - -static int clk_debugfs_register_one(struct clk *c) -{ - int err; - struct dentry *d, *child, *child_tmp; - struct clk *pa = c->parent; - char s[255]; - char *p = s; - - p += sprintf(p, "%p", c); - d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root); - if (!d) - return -ENOMEM; - c->dentry = d; - - d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount); - if (!d) { - err = -ENOMEM; - goto err_out; - } - d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate); - if (!d) { - err = -ENOMEM; - goto err_out; - } - d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags); - if (!d) { - err = -ENOMEM; - goto err_out; - } - return 0; - -err_out: - d = c->dentry; - list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) - debugfs_remove(child); - debugfs_remove(c->dentry); - return err; -} - -static int clk_debugfs_register(struct clk *c) +static int __init clk_late_init(void) { - int err; - struct clk *pa = c->parent; - - if (pa && !pa->dentry) { - err = clk_debugfs_register(pa); - if (err) - return err; - } + unsigned long flags; + struct clk *clk; - if (!c->dentry) { - err = clk_debugfs_register_one(c); - if (err) - return err; - } - return 0; -} + /* disable all clocks with zero use count */ + mutex_lock(&clock_list_sem); + spin_lock_irqsave(&clock_lock, flags); -static int __init clk_debugfs_init(void) -{ - struct clk *c; - struct dentry *d; - int err; + list_for_each_entry(clk, &clock_list, node) + if (!clk->usecount && clk->ops && clk->ops->disable) + clk->ops->disable(clk); - d = debugfs_create_dir("clock", NULL); - if (!d) - return -ENOMEM; - clk_debugfs_root = d; + /* from now on allow clock disable operations */ + allow_disable = 1; - list_for_each_entry(c, &clock_list, node) { - err = clk_debugfs_register(c); - if (err) - goto err_out; - } + spin_unlock_irqrestore(&clock_lock, flags); + mutex_unlock(&clock_list_sem); return 0; -err_out: - debugfs_remove_recursive(clk_debugfs_root); - return err; } -late_initcall(clk_debugfs_init); +late_initcall(clk_late_init); diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 3aea5f0ceb0..7442bc13005 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -2,6 +2,7 @@ * Helper routines for SuperH Clock Pulse Generator blocks (CPG). * * Copyright (C) 2010 Magnus Damm + * Copyright (C) 2010 - 2012 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -13,26 +14,84 @@ #include <linux/io.h> #include <linux/sh_clk.h> -static int sh_clk_mstp32_enable(struct clk *clk) +#define CPG_CKSTP_BIT BIT(8) + +static unsigned int sh_clk_read(struct clk *clk) +{ + if (clk->flags & CLK_ENABLE_REG_8BIT) + return ioread8(clk->mapped_reg); + else if (clk->flags & CLK_ENABLE_REG_16BIT) + return ioread16(clk->mapped_reg); + + return ioread32(clk->mapped_reg); +} + +static void sh_clk_write(int value, struct clk *clk) +{ + if (clk->flags & CLK_ENABLE_REG_8BIT) + iowrite8(value, clk->mapped_reg); + else if (clk->flags & CLK_ENABLE_REG_16BIT) + iowrite16(value, clk->mapped_reg); + else + iowrite32(value, clk->mapped_reg); +} + +static unsigned int r8(const void __iomem *addr) +{ + return ioread8(addr); +} + +static unsigned int r16(const void __iomem *addr) { - __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit), - clk->enable_reg); + return ioread16(addr); +} + +static unsigned int r32(const void __iomem *addr) +{ + return ioread32(addr); +} + +static int sh_clk_mstp_enable(struct clk *clk) +{ + sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); + if (clk->status_reg) { + unsigned int (*read)(const void __iomem *addr); + int i; + void __iomem *mapped_status = (phys_addr_t)clk->status_reg - + (phys_addr_t)clk->enable_reg + clk->mapped_reg; + + if (clk->flags & CLK_ENABLE_REG_8BIT) + read = r8; + else if (clk->flags & CLK_ENABLE_REG_16BIT) + read = r16; + else + read = r32; + + for (i = 1000; + (read(mapped_status) & (1 << clk->enable_bit)) && i; + i--) + cpu_relax(); + if (!i) { + pr_err("cpg: failed to enable %p[%d]\n", + clk->enable_reg, clk->enable_bit); + return -ETIMEDOUT; + } + } return 0; } -static void sh_clk_mstp32_disable(struct clk *clk) +static void sh_clk_mstp_disable(struct clk *clk) { - __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit), - clk->enable_reg); + sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk); } -static struct clk_ops sh_clk_mstp32_clk_ops = { - .enable = sh_clk_mstp32_enable, - .disable = sh_clk_mstp32_disable, +static struct sh_clk_ops sh_clk_mstp_clk_ops = { + .enable = sh_clk_mstp_enable, + .disable = sh_clk_mstp_disable, .recalc = followparent_recalc, }; -int __init sh_clk_mstp32_register(struct clk *clks, int nr) +int __init sh_clk_mstp_register(struct clk *clks, int nr) { struct clk *clkp; int ret = 0; @@ -40,79 +99,50 @@ int __init sh_clk_mstp32_register(struct clk *clks, int nr) for (k = 0; !ret && (k < nr); k++) { clkp = clks + k; - clkp->ops = &sh_clk_mstp32_clk_ops; + clkp->ops = &sh_clk_mstp_clk_ops; ret |= clk_register(clkp); } return ret; } -static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate) +/* + * Div/mult table lookup helpers + */ +static inline struct clk_div_table *clk_to_div_table(struct clk *clk) { - return clk_rate_table_round(clk, clk->freq_table, rate); + return clk->priv; } -static int sh_clk_div6_divisors[64] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, - 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 -}; +static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk) +{ + return clk_to_div_table(clk)->div_mult_table; +} -static struct clk_div_mult_table sh_clk_div6_table = { - .divisors = sh_clk_div6_divisors, - .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors), -}; +/* + * Common div ops + */ +static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate) +{ + return clk_rate_table_round(clk, clk->freq_table, rate); +} -static unsigned long sh_clk_div6_recalc(struct clk *clk) +static unsigned long sh_clk_div_recalc(struct clk *clk) { - struct clk_div_mult_table *table = &sh_clk_div6_table; + struct clk_div_mult_table *table = clk_to_div_mult_table(clk); unsigned int idx; clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, - table, NULL); + table, clk->arch_flags ? &clk->arch_flags : NULL); - idx = __raw_readl(clk->enable_reg) & 0x003f; + idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask; return clk->freq_table[idx].frequency; } -static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) -{ - struct clk_div_mult_table *table = &sh_clk_div6_table; - u32 value; - int ret, i; - - if (!clk->parent_table || !clk->parent_num) - return -EINVAL; - - /* Search the parent */ - for (i = 0; i < clk->parent_num; i++) - if (clk->parent_table[i] == parent) - break; - - if (i == clk->parent_num) - return -ENODEV; - - ret = clk_reparent(clk, parent); - if (ret < 0) - return ret; - - value = __raw_readl(clk->enable_reg) & - ~(((1 << clk->src_width) - 1) << clk->src_shift); - - __raw_writel(value | (i << clk->src_shift), clk->enable_reg); - - /* Rebuild the frequency table */ - clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, - table, &clk->arch_flags); - - return 0; -} - -static int sh_clk_div6_set_rate(struct clk *clk, - unsigned long rate, int algo_id) +static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate) { + struct clk_div_table *dt = clk_to_div_table(clk); unsigned long value; int idx; @@ -120,60 +150,100 @@ static int sh_clk_div6_set_rate(struct clk *clk, if (idx < 0) return idx; - value = __raw_readl(clk->enable_reg); - value &= ~0x3f; - value |= idx; - __raw_writel(value, clk->enable_reg); + value = sh_clk_read(clk); + value &= ~(clk->div_mask << clk->enable_bit); + value |= (idx << clk->enable_bit); + sh_clk_write(value, clk); + + /* XXX: Should use a post-change notifier */ + if (dt->kick) + dt->kick(clk); + return 0; } -static int sh_clk_div6_enable(struct clk *clk) +static int sh_clk_div_enable(struct clk *clk) { - unsigned long value; - int ret; - - ret = sh_clk_div6_set_rate(clk, clk->rate, 0); - if (ret == 0) { - value = __raw_readl(clk->enable_reg); - value &= ~0x100; /* clear stop bit to enable clock */ - __raw_writel(value, clk->enable_reg); + if (clk->div_mask == SH_CLK_DIV6_MSK) { + int ret = sh_clk_div_set_rate(clk, clk->rate); + if (ret < 0) + return ret; } - return ret; + + sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk); + return 0; } -static void sh_clk_div6_disable(struct clk *clk) +static void sh_clk_div_disable(struct clk *clk) { - unsigned long value; + unsigned int val; - value = __raw_readl(clk->enable_reg); - value |= 0x100; /* stop clock */ - value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */ - __raw_writel(value, clk->enable_reg); + val = sh_clk_read(clk); + val |= CPG_CKSTP_BIT; + + /* + * div6 clocks require the divisor field to be non-zero or the + * above CKSTP toggle silently fails. Ensure that the divisor + * array is reset to its initial state on disable. + */ + if (clk->flags & CLK_MASK_DIV_ON_DISABLE) + val |= clk->div_mask; + + sh_clk_write(val, clk); } -static struct clk_ops sh_clk_div6_clk_ops = { - .recalc = sh_clk_div6_recalc, +static struct sh_clk_ops sh_clk_div_clk_ops = { + .recalc = sh_clk_div_recalc, + .set_rate = sh_clk_div_set_rate, .round_rate = sh_clk_div_round_rate, - .set_rate = sh_clk_div6_set_rate, - .enable = sh_clk_div6_enable, - .disable = sh_clk_div6_disable, }; -static struct clk_ops sh_clk_div6_reparent_clk_ops = { - .recalc = sh_clk_div6_recalc, +static struct sh_clk_ops sh_clk_div_enable_clk_ops = { + .recalc = sh_clk_div_recalc, + .set_rate = sh_clk_div_set_rate, .round_rate = sh_clk_div_round_rate, - .set_rate = sh_clk_div6_set_rate, - .enable = sh_clk_div6_enable, - .disable = sh_clk_div6_disable, - .set_parent = sh_clk_div6_set_parent, + .enable = sh_clk_div_enable, + .disable = sh_clk_div_disable, }; -static int __init sh_clk_div6_register_ops(struct clk *clks, int nr, - struct clk_ops *ops) +static int __init sh_clk_init_parent(struct clk *clk) +{ + u32 val; + + if (clk->parent) + return 0; + + if (!clk->parent_table || !clk->parent_num) + return 0; + + if (!clk->src_width) { + pr_err("sh_clk_init_parent: cannot select parent clock\n"); + return -EINVAL; + } + + val = (sh_clk_read(clk) >> clk->src_shift); + val &= (1 << clk->src_width) - 1; + + if (val >= clk->parent_num) { + pr_err("sh_clk_init_parent: parent table size failed\n"); + return -EINVAL; + } + + clk_reparent(clk, clk->parent_table[val]); + if (!clk->parent) { + pr_err("sh_clk_init_parent: unable to set parent"); + return -EINVAL; + } + + return 0; +} + +static int __init sh_clk_div_register_ops(struct clk *clks, int nr, + struct clk_div_table *table, struct sh_clk_ops *ops) { struct clk *clkp; void *freq_table; - int nr_divs = sh_clk_div6_table.nr_divisors; + int nr_divs = table->div_mult_table->nr_divisors; int freq_table_size = sizeof(struct cpufreq_frequency_table); int ret = 0; int k; @@ -181,7 +251,7 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr, freq_table_size *= (nr_divs + 1); freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); if (!freq_table) { - pr_err("sh_clk_div6_register: unable to alloc memory\n"); + pr_err("%s: unable to alloc memory\n", __func__); return -ENOMEM; } @@ -189,44 +259,98 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr, clkp = clks + k; clkp->ops = ops; + clkp->priv = table; + clkp->freq_table = freq_table + (k * freq_table_size); clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; ret = clk_register(clkp); + if (ret == 0) + ret = sh_clk_init_parent(clkp); } return ret; } -int __init sh_clk_div6_register(struct clk *clks, int nr) -{ - return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops); -} +/* + * div6 support + */ +static int sh_clk_div6_divisors[64] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 +}; -int __init sh_clk_div6_reparent_register(struct clk *clks, int nr) -{ - return sh_clk_div6_register_ops(clks, nr, - &sh_clk_div6_reparent_clk_ops); -} +static struct clk_div_mult_table div6_div_mult_table = { + .divisors = sh_clk_div6_divisors, + .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors), +}; + +static struct clk_div_table sh_clk_div6_table = { + .div_mult_table = &div6_div_mult_table, +}; -static unsigned long sh_clk_div4_recalc(struct clk *clk) +static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) { - struct clk_div4_table *d4t = clk->priv; - struct clk_div_mult_table *table = d4t->div_mult_table; - unsigned int idx; + struct clk_div_mult_table *table = clk_to_div_mult_table(clk); + u32 value; + int ret, i; + + if (!clk->parent_table || !clk->parent_num) + return -EINVAL; + + /* Search the parent */ + for (i = 0; i < clk->parent_num; i++) + if (clk->parent_table[i] == parent) + break; + if (i == clk->parent_num) + return -ENODEV; + + ret = clk_reparent(clk, parent); + if (ret < 0) + return ret; + + value = sh_clk_read(clk) & + ~(((1 << clk->src_width) - 1) << clk->src_shift); + + sh_clk_write(value | (i << clk->src_shift), clk); + + /* Rebuild the frequency table */ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, - table, &clk->arch_flags); + table, NULL); - idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f; + return 0; +} - return clk->freq_table[idx].frequency; +static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = { + .recalc = sh_clk_div_recalc, + .round_rate = sh_clk_div_round_rate, + .set_rate = sh_clk_div_set_rate, + .enable = sh_clk_div_enable, + .disable = sh_clk_div_disable, + .set_parent = sh_clk_div6_set_parent, +}; + +int __init sh_clk_div6_register(struct clk *clks, int nr) +{ + return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table, + &sh_clk_div_enable_clk_ops); } +int __init sh_clk_div6_reparent_register(struct clk *clks, int nr) +{ + return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table, + &sh_clk_div6_reparent_clk_ops); +} + +/* + * div4 support + */ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) { - struct clk_div4_table *d4t = clk->priv; - struct clk_div_mult_table *table = d4t->div_mult_table; + struct clk_div_mult_table *table = clk_to_div_mult_table(clk); u32 value; int ret; @@ -236,15 +360,15 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) */ if (parent->flags & CLK_ENABLE_ON_INIT) - value = __raw_readl(clk->enable_reg) & ~(1 << 7); + value = sh_clk_read(clk) & ~(1 << 7); else - value = __raw_readl(clk->enable_reg) | (1 << 7); + value = sh_clk_read(clk) | (1 << 7); ret = clk_reparent(clk, parent); if (ret < 0) return ret; - __raw_writel(value, clk->enable_reg); + sh_clk_write(value, clk); /* Rebiuld the frequency table */ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, @@ -253,107 +377,116 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) return 0; } -static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) -{ - struct clk_div4_table *d4t = clk->priv; - unsigned long value; - int idx = clk_rate_table_find(clk, clk->freq_table, rate); - if (idx < 0) - return idx; - - value = __raw_readl(clk->enable_reg); - value &= ~(0xf << clk->enable_bit); - value |= (idx << clk->enable_bit); - __raw_writel(value, clk->enable_reg); - - if (d4t->kick) - d4t->kick(clk); +static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = { + .recalc = sh_clk_div_recalc, + .set_rate = sh_clk_div_set_rate, + .round_rate = sh_clk_div_round_rate, + .enable = sh_clk_div_enable, + .disable = sh_clk_div_disable, + .set_parent = sh_clk_div4_set_parent, +}; - return 0; +int __init sh_clk_div4_register(struct clk *clks, int nr, + struct clk_div4_table *table) +{ + return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops); } -static int sh_clk_div4_enable(struct clk *clk) +int __init sh_clk_div4_enable_register(struct clk *clks, int nr, + struct clk_div4_table *table) { - __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg); - return 0; + return sh_clk_div_register_ops(clks, nr, table, + &sh_clk_div_enable_clk_ops); } -static void sh_clk_div4_disable(struct clk *clk) +int __init sh_clk_div4_reparent_register(struct clk *clks, int nr, + struct clk_div4_table *table) { - __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg); + return sh_clk_div_register_ops(clks, nr, table, + &sh_clk_div4_reparent_clk_ops); } -static struct clk_ops sh_clk_div4_clk_ops = { - .recalc = sh_clk_div4_recalc, - .set_rate = sh_clk_div4_set_rate, - .round_rate = sh_clk_div_round_rate, -}; +/* FSI-DIV */ +static unsigned long fsidiv_recalc(struct clk *clk) +{ + u32 value; -static struct clk_ops sh_clk_div4_enable_clk_ops = { - .recalc = sh_clk_div4_recalc, - .set_rate = sh_clk_div4_set_rate, - .round_rate = sh_clk_div_round_rate, - .enable = sh_clk_div4_enable, - .disable = sh_clk_div4_disable, -}; + value = __raw_readl(clk->mapping->base); -static struct clk_ops sh_clk_div4_reparent_clk_ops = { - .recalc = sh_clk_div4_recalc, - .set_rate = sh_clk_div4_set_rate, - .round_rate = sh_clk_div_round_rate, - .enable = sh_clk_div4_enable, - .disable = sh_clk_div4_disable, - .set_parent = sh_clk_div4_set_parent, -}; + value >>= 16; + if (value < 2) + return clk->parent->rate; -static int __init sh_clk_div4_register_ops(struct clk *clks, int nr, - struct clk_div4_table *table, struct clk_ops *ops) -{ - struct clk *clkp; - void *freq_table; - int nr_divs = table->div_mult_table->nr_divisors; - int freq_table_size = sizeof(struct cpufreq_frequency_table); - int ret = 0; - int k; + return clk->parent->rate / value; +} - freq_table_size *= (nr_divs + 1); - freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); - if (!freq_table) { - pr_err("sh_clk_div4_register: unable to alloc memory\n"); - return -ENOMEM; - } +static long fsidiv_round_rate(struct clk *clk, unsigned long rate) +{ + return clk_rate_div_range_round(clk, 1, 0xffff, rate); +} - for (k = 0; !ret && (k < nr); k++) { - clkp = clks + k; +static void fsidiv_disable(struct clk *clk) +{ + __raw_writel(0, clk->mapping->base); +} - clkp->ops = ops; - clkp->priv = table; +static int fsidiv_enable(struct clk *clk) +{ + u32 value; - clkp->freq_table = freq_table + (k * freq_table_size); - clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; + value = __raw_readl(clk->mapping->base) >> 16; + if (value < 2) + return 0; - ret = clk_register(clkp); - } + __raw_writel((value << 16) | 0x3, clk->mapping->base); - return ret; + return 0; } -int __init sh_clk_div4_register(struct clk *clks, int nr, - struct clk_div4_table *table) +static int fsidiv_set_rate(struct clk *clk, unsigned long rate) { - return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops); -} + int idx; -int __init sh_clk_div4_enable_register(struct clk *clks, int nr, - struct clk_div4_table *table) -{ - return sh_clk_div4_register_ops(clks, nr, table, - &sh_clk_div4_enable_clk_ops); + idx = (clk->parent->rate / rate) & 0xffff; + if (idx < 2) + __raw_writel(0, clk->mapping->base); + else + __raw_writel(idx << 16, clk->mapping->base); + + return 0; } -int __init sh_clk_div4_reparent_register(struct clk *clks, int nr, - struct clk_div4_table *table) +static struct sh_clk_ops fsidiv_clk_ops = { + .recalc = fsidiv_recalc, + .round_rate = fsidiv_round_rate, + .set_rate = fsidiv_set_rate, + .enable = fsidiv_enable, + .disable = fsidiv_disable, +}; + +int __init sh_clk_fsidiv_register(struct clk *clks, int nr) { - return sh_clk_div4_register_ops(clks, nr, table, - &sh_clk_div4_reparent_clk_ops); + struct clk_mapping *map; + int i; + + for (i = 0; i < nr; i++) { + + map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL); + if (!map) { + pr_err("%s: unable to alloc memory\n", __func__); + return -ENOMEM; + } + + /* clks[i].enable_reg came from SH_CLK_FSIDIV() */ + map->phys = (phys_addr_t)clks[i].enable_reg; + map->len = 8; + + clks[i].enable_reg = 0; /* remove .enable_reg */ + clks[i].ops = &fsidiv_clk_ops; + clks[i].mapping = map; + + clk_register(&clks[i]); + } + + return 0; } |
